aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-firmware-zynqmp103
-rw-r--r--Documentation/ABI/testing/sysfs-class-fpga-bridge9
-rw-r--r--Documentation/ABI/testing/sysfs-driver-cortexa53-edac10
-rw-r--r--Documentation/devicetree/bindings/arm/zynq/zynq-efuse.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/zynq/zynq-ocmc.txt17
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-ceva.txt3
-rw-r--r--Documentation/devicetree/bindings/clock/idt,idt8t49n24x.txt156
-rw-r--r--Documentation/devicetree/bindings/clock/silabs,si5324.txt78
-rw-r--r--Documentation/devicetree/bindings/clock/xlnx,versal-clk.txt48
-rw-r--r--Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt12
-rw-r--r--Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt12
-rw-r--r--Documentation/devicetree/bindings/crypto/zynqmp-sha.txt12
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/bridge.txt29
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,dsi.txt73
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,mixer.txt163
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,pl-disp.txt41
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt54
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-csc.txt35
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-scaler.txt51
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,vtc.txt32
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.txt82
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/axi-cdma.txt31
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/axi-dma.txt38
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt67
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/vdmatest.txt39
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt18
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_dpdma.txt91
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt123
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/cresample.txt22
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/dp.txt65
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/dp_sub.txt65
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/dsi.txt61
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/osd.txt19
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/rgb2ycrcb.txt14
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/sdi.txt34
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/vtc.txt18
-rw-r--r--Documentation/devicetree/bindings/drm/xilinx/xilinx_drm.txt162
-rw-r--r--Documentation/devicetree/bindings/drm/zocl/zocl_drm.txt13
-rw-r--r--Documentation/devicetree/bindings/edac/cortex-arm64-edac.txt15
-rw-r--r--Documentation/devicetree/bindings/edac/pl310_edac_l2.txt19
-rw-r--r--Documentation/devicetree/bindings/edac/zynqmp_ocm_edac.txt18
-rw-r--r--Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt16
-rw-r--r--Documentation/devicetree/bindings/fpga/fpga-region.txt1
-rw-r--r--Documentation/devicetree/bindings/fpga/xlnx,afi-fpga.txt61
-rw-r--r--Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.txt10
-rw-r--r--Documentation/devicetree/bindings/fpga/xlnx,zynq-afi-fpga.txt19
-rw-r--r--Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt4
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-xilinx.txt21
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-zynq.txt3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/xilinx-ams.txt159
-rw-r--r--Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt19
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/xilinx,intc.txt56
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt122
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,mem2mem.txt25
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt66
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt141
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-cfa.txt58
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-cresample.txt54
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt62
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt63
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-hls.txt64
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-multi-scaler.txt95
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-remapper.txt61
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-rgb2yuv.txt54
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-scaler.txt75
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-scd.txt164
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-switch.txt55
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt17
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt66
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt93
-rw-r--r--Documentation/devicetree/bindings/misc/jesd-phy.txt24
-rw-r--r--Documentation/devicetree/bindings/misc/jesd204b.txt28
-rw-r--r--Documentation/devicetree/bindings/misc/xilinx-axitrafgen.txt25
-rw-r--r--Documentation/devicetree/bindings/misc/xlnx,fclk.txt12
-rw-r--r--Documentation/devicetree/bindings/misc/xlnx,sd-fec.txt58
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.txt30
-rw-r--r--Documentation/devicetree/bindings/mtd/arasan_nand.txt33
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt7
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83867.txt4
-rw-r--r--Documentation/devicetree/bindings/net/xilinx-phy.txt15
-rw-r--r--Documentation/devicetree/bindings/net/xilinx-tsn-ethernet.txt54
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_axienet.txt135
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_emaclite.txt34
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_tsn.txt14
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_tsn_ep.txt35
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_tsn_switch.txt23
-rw-r--r--Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt90
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-xdma-pl-pcie.txt87
-rw-r--r--Documentation/devicetree/bindings/phy/phy-zynqmp.txt119
-rw-r--r--Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.txt275
-rw-r--r--Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt41
-rw-r--r--Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt135
-rw-r--r--Documentation/devicetree/bindings/remoteproc/zynq_remoteproc.txt36
-rw-r--r--Documentation/devicetree/bindings/serial/uartlite.c26
-rw-r--r--Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt23
-rw-r--r--Documentation/devicetree/bindings/soc/xilinx/xlnx,ai_engine.txt28
-rw-r--r--Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt54
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,dp-snd-card.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,dp-snd-codec.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,dp-snd-pcm.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,i2s.txt14
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,spdif.txt9
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt60
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.txt15
-rw-r--r--Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt11
-rw-r--r--Documentation/devicetree/bindings/staging/xroeframer/xroeframer.txt17
-rw-r--r--Documentation/devicetree/bindings/staging/xroetrafficgen/xroetrafficgen.txt15
-rw-r--r--Documentation/devicetree/bindings/uio/xilinx_apm.txt44
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-xilinx.txt21
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt13
-rw-r--r--Documentation/devicetree/bindings/usb/ehci-xilinx.txt21
-rw-r--r--Documentation/devicetree/bindings/usb/udc-xilinx.txt19
-rw-r--r--Documentation/devicetree/bindings/video/xilinx-fb.txt35
-rw-r--r--Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt21
-rw-r--r--Documentation/devicetree/bindings/xilinx.txt1
-rw-r--r--Documentation/devicetree/bindings/xlnx,ctrl-fb.txt22
-rw-r--r--Documentation/devicetree/bindings/xlnx,ctrl-vpss.txt21
-rw-r--r--Documentation/devicetree/configfs-overlays.txt31
-rw-r--r--Documentation/driver-api/dmaengine/provider.rst7
-rw-r--r--Documentation/media/uapi/v4l/subdev-formats.rst872
-rw-r--r--Documentation/misc-devices/xilinx_sdfec.rst291
-rw-r--r--Documentation/misc-devices/xilinx_trafgen.txt87
-rw-r--r--MAINTAINERS289
-rw-r--r--arch/arm/boot/dts/zynq-7000.dtsi221
-rw-r--r--arch/arm/boot/dts/zynq-cc108.dts41
-rw-r--r--arch/arm/boot/dts/zynq-zc702.dts71
-rw-r--r--arch/arm/boot/dts/zynq-zc706.dts54
-rw-r--r--arch/arm/boot/dts/zynq-zc770-xm010.dts38
-rw-r--r--arch/arm/boot/dts/zynq-zc770-xm011.dts42
-rw-r--r--arch/arm/boot/dts/zynq-zc770-xm012.dts42
-rw-r--r--arch/arm/boot/dts/zynq-zc770-xm013.dts39
-rw-r--r--arch/arm/boot/dts/zynq-zed.dts51
-rw-r--r--arch/arm/boot/dts/zynq-zturn.dts2
-rw-r--r--arch/arm/boot/dts/zynq-zybo.dts12
-rw-r--r--arch/arm/configs/xilinx_zynq_defconfig242
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/smp.h3
-rw-r--r--arch/arm/kernel/smp.c168
-rw-r--r--arch/arm/mach-zynq/Kconfig16
-rw-r--r--arch/arm/mach-zynq/Makefile6
-rw-r--r--arch/arm/mach-zynq/common.c9
-rw-r--r--arch/arm/mach-zynq/common.h26
-rw-r--r--arch/arm/mach-zynq/efuse.c75
-rw-r--r--arch/arm/mach-zynq/platsmp.c10
-rw-r--r--arch/arm/mach-zynq/pm.c170
-rw-r--r--arch/arm/mach-zynq/slcr.c47
-rw-r--r--arch/arm/mach-zynq/suspend.S185
-rw-r--r--arch/arm/mach-zynq/zynq_ocm.c245
-rw-r--r--arch/arm64/Kconfig.platforms2
-rw-r--r--arch/arm64/boot/dts/xilinx/Makefile3
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi313
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi46
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts35
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts32
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts32
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revB.dts100
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts309
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts382
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts62
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts47
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts331
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts332
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts445
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts1
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts316
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts547
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts379
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts322
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu1285-revA.dts245
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi622
-rw-r--r--arch/arm64/configs/xilinx_versal_defconfig178
-rw-r--r--arch/arm64/configs/xilinx_zynqmp_defconfig399
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/configs/mmu_defconfig21
-rw-r--r--arch/microblaze/configs/nommu_defconfig21
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/irq.h1
-rw-r--r--arch/microblaze/kernel/head.S2
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S2
-rw-r--r--arch/microblaze/pci/pci-common.c32
-rw-r--r--crypto/af_alg.c7
-rw-r--r--crypto/algif_skcipher.c7
-rw-r--r--crypto/blkcipher.c9
-rw-r--r--crypto/skcipher.c18
-rw-r--r--drivers/bluetooth/hci_ll.c1
-rw-r--r--drivers/clk/Kconfig33
-rw-r--r--drivers/clk/Makefile4
-rw-r--r--drivers/clk/clk-fixed-factor.c5
-rw-r--r--drivers/clk/clk-si5324.c1227
-rw-r--r--drivers/clk/clk-si5324.h140
-rw-r--r--drivers/clk/clk.c28
-rw-r--r--drivers/clk/idt/Makefile3
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x-core.c933
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x-core.h272
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x-debugfs.c382
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x-debugfs.h21
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x.c641
-rw-r--r--drivers/clk/si5324.h68
-rw-r--r--drivers/clk/si5324drv.c382
-rw-r--r--drivers/clk/si5324drv.h100
-rw-r--r--drivers/clk/zynq/clkc.c43
-rw-r--r--drivers/clk/zynqmp/clkc.c6
-rw-r--r--drivers/clk/zynqmp/divider.c63
-rw-r--r--drivers/clk/zynqmp/pll.c9
-rw-r--r--drivers/crypto/Kconfig30
-rw-r--r--drivers/crypto/Makefile3
-rw-r--r--drivers/crypto/zynqmp-aes.c336
-rw-r--r--drivers/crypto/zynqmp-rsa.c240
-rw-r--r--drivers/crypto/zynqmp-sha.c301
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dmatest.c38
-rw-r--r--drivers/dma/xilinx/Kconfig67
-rw-r--r--drivers/dma/xilinx/Makefile8
-rw-r--r--drivers/dma/xilinx/axidmatest.c664
-rw-r--r--drivers/dma/xilinx/cdmatest.c662
-rw-r--r--drivers/dma/xilinx/vdmatest.c662
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c292
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c2322
-rw-r--r--drivers/dma/xilinx/xilinx_frmbuf.c1709
-rw-r--r--drivers/dma/xilinx/xilinx_ps_pcie.h44
-rw-r--r--drivers/dma/xilinx/xilinx_ps_pcie_dma_client.c1402
-rw-r--r--drivers/dma/xilinx/xilinx_ps_pcie_main.c200
-rw-r--r--drivers/dma/xilinx/xilinx_ps_pcie_platform.c3170
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c94
-rw-r--r--drivers/edac/Kconfig22
-rw-r--r--drivers/edac/Makefile3
-rw-r--r--drivers/edac/cortex_arm64_edac.c470
-rw-r--r--drivers/edac/pl310_edac_l2.c233
-rw-r--r--drivers/edac/zynqmp_ocm_edac.c651
-rw-r--r--drivers/firmware/xilinx/Kconfig6
-rw-r--r--drivers/firmware/xilinx/Makefile3
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c260
-rw-r--r--drivers/firmware/xilinx/zynqmp-ggs.c289
-rw-r--r--drivers/firmware/xilinx/zynqmp-secure.c197
-rw-r--r--drivers/firmware/xilinx/zynqmp.c910
-rw-r--r--drivers/fpga/Kconfig34
-rw-r--r--drivers/fpga/Makefile3
-rw-r--r--drivers/fpga/fpga-bridge.c30
-rw-r--r--drivers/fpga/fpga-mgr.c243
-rw-r--r--drivers/fpga/fpga-region.c10
-rw-r--r--drivers/fpga/versal-fpga.c201
-rw-r--r--drivers/fpga/xilinx-afi.c92
-rw-r--r--drivers/fpga/zynq-afi.c81
-rw-r--r--drivers/fpga/zynqmp-fpga.c260
-rw-r--r--drivers/gpio/gpio-xilinx.c686
-rw-r--r--drivers/gpio/gpio-zynq.c49
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_fourcc.c42
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c3
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c31
-rw-r--r--drivers/gpu/drm/xilinx/Kconfig59
-rw-r--r--drivers/gpu/drm/xilinx/Makefile14
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_cresample.c154
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_cresample.h40
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_connector.c204
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_connector.h29
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_crtc.c595
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_crtc.h39
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_dp.c2186
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.c2265
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.h69
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_drv.c614
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_drv.h65
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_dsi.c808
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_encoder.c240
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_encoder.h28
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_fb.c516
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_fb.h38
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_gem.c45
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_gem.h25
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_plane.c1098
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_plane.h61
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_sdi.c1452
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_drm_sdi.h29
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_osd.c382
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_osd.h62
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_rgb2yuv.c119
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_rgb2yuv.h35
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_vtc.c645
-rw-r--r--drivers/gpu/drm/xilinx/xilinx_vtc.h44
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig104
-rw-r--r--drivers/gpu/drm/xlnx/Makefile21
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_bridge.c561
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_bridge.h178
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_crtc.c206
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_crtc.h76
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_csc.c571
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_drv.c540
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_drv.h33
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_dsi.c907
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_fb.c306
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_fb.h33
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_gem.c47
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_gem.h26
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_mixer.c2821
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_pl_disp.c618
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_scaler.c1748
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi.c1227
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi_modes.h356
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi_timing.c425
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi_timing.h20
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_vtc.c447
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c3333
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.h36
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c1917
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.h38
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c194
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.h28
-rw-r--r--drivers/gpu/drm/zocl/Kconfig8
-rw-r--r--drivers/gpu/drm/zocl/Makefile4
-rw-r--r--drivers/gpu/drm/zocl/zocl_bo.c271
-rw-r--r--drivers/gpu/drm/zocl/zocl_drv.c217
-rw-r--r--drivers/gpu/drm/zocl/zocl_drv.h59
-rw-r--r--drivers/hwmon/pmbus/pmbus.c7
-rw-r--r--drivers/i2c/busses/i2c-cadence.c549
-rw-r--r--drivers/i2c/busses/i2c-xiic.c69
-rw-r--r--drivers/iio/adc/Kconfig12
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/xilinx-ams.c1109
-rw-r--r--drivers/iio/adc/xilinx-ams.h278
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c137
-rw-r--r--drivers/irqchip/Kconfig6
-rw-r--r--drivers/irqchip/irq-gic.c17
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c143
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c531
-rw-r--r--drivers/media/i2c/ov5640.c247
-rw-r--r--drivers/media/media-entity.c31
-rw-r--r--drivers/media/platform/xilinx/Kconfig134
-rw-r--r--drivers/media/platform/xilinx/Makefile19
-rw-r--r--drivers/media/platform/xilinx/xilinx-axis-switch.c588
-rw-r--r--drivers/media/platform/xilinx/xilinx-cfa.c394
-rw-r--r--drivers/media/platform/xilinx/xilinx-cresample.c447
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c2023
-rw-r--r--drivers/media/platform/xilinx/xilinx-demosaic.c418
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c754
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h16
-rw-r--r--drivers/media/platform/xilinx/xilinx-gamma-coeff.h5385
-rw-r--r--drivers/media/platform/xilinx/xilinx-gamma.c543
-rw-r--r--drivers/media/platform/xilinx/xilinx-hls-common.h36
-rw-r--r--drivers/media/platform/xilinx/xilinx-hls.c481
-rw-r--r--drivers/media/platform/xilinx/xilinx-m2m.c2106
-rw-r--r--drivers/media/platform/xilinx/xilinx-multi-scaler-coeff.h574
-rw-r--r--drivers/media/platform/xilinx/xilinx-multi-scaler.c2449
-rw-r--r--drivers/media/platform/xilinx/xilinx-remapper.c546
-rw-r--r--drivers/media/platform/xilinx/xilinx-rgb2yuv.c566
-rw-r--r--drivers/media/platform/xilinx/xilinx-scaler.c708
-rw-r--r--drivers/media/platform/xilinx/xilinx-scenechange-channel.c352
-rw-r--r--drivers/media/platform/xilinx/xilinx-scenechange-dma.c554
-rw-r--r--drivers/media/platform/xilinx/xilinx-scenechange.c191
-rw-r--r--drivers/media/platform/xilinx/xilinx-scenechange.h234
-rw-r--r--drivers/media/platform/xilinx/xilinx-sdirxss.c1855
-rw-r--r--drivers/media/platform/xilinx/xilinx-switch.c460
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c627
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c177
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h14
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c62
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.h5
-rw-r--r--drivers/media/platform/xilinx/xilinx-vpss-csc.c1169
-rw-r--r--drivers/media/platform/xilinx/xilinx-vpss-scaler.c1878
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.c18
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.h1
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c13
-rw-r--r--drivers/media/usb/uvc/uvc_video.c64
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h5
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c30
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c28
-rw-r--r--drivers/misc/Kconfig25
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/jesd204b/Kconfig28
-rw-r--r--drivers/misc/jesd204b/Makefile5
-rw-r--r--drivers/misc/jesd204b/gtx7s_cpll_bands.c88
-rw-r--r--drivers/misc/jesd204b/gtx7s_cpll_bands.h31
-rw-r--r--drivers/misc/jesd204b/gtx7s_qpll_bands.c96
-rw-r--r--drivers/misc/jesd204b/gtx7s_qpll_bands.h30
-rw-r--r--drivers/misc/jesd204b/jesd_phy.c384
-rw-r--r--drivers/misc/jesd204b/jesd_phy.h42
-rw-r--r--drivers/misc/jesd204b/s7_gtxe2_drp.h123
-rw-r--r--drivers/misc/jesd204b/xilinx_jesd204b.c399
-rw-r--r--drivers/misc/jesd204b/xilinx_jesd204b.h135
-rw-r--r--drivers/misc/xilinx_sdfec.c1683
-rw-r--r--drivers/misc/xilinx_trafgen.c1494
-rw-r--r--drivers/mmc/core/sd.c17
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c629
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c3
-rw-r--r--drivers/mmc/host/sdhci.c3
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/chips/cfi_probe.c45
-rw-r--r--drivers/mtd/devices/m25p80.c1
-rw-r--r--drivers/mtd/nand/raw/Kconfig14
-rw-r--r--drivers/mtd/nand/raw/Makefile2
-rw-r--r--drivers/mtd/nand/raw/arasan_nand.c1465
-rw-r--r--drivers/mtd/nand/raw/nand_base.c15
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c8
-rw-r--r--drivers/mtd/nand/raw/nand_timings.c6
-rw-r--r--drivers/mtd/nand/raw/pl353_nand.c1398
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c774
-rw-r--r--drivers/net/can/xilinx_can.c310
-rw-r--r--drivers/net/ethernet/cadence/macb.h37
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c344
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c21
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig79
-rw-r--r--drivers/net/ethernet/xilinx/Makefile10
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c17
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h747
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_dma.c507
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2674
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mcdma.c1043
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c75
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c61
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_cb.c177
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_ep.c161
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_preemption.c223
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_preemption.h159
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_ptp.h88
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_ptp_clock.c325
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_ptp_xmit.c369
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_qci.c151
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_shaper.c232
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_shaper.h151
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_switch.c807
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_switch.h364
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_timer.h73
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/dp83867.c130
-rw-r--r--drivers/net/phy/xilinx_phy.c160
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c184
-rw-r--r--drivers/of/Kconfig7
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/configfs.c293
-rw-r--r--drivers/of/dynamic.c173
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/controller/Kconfig8
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/pcie-xdma-pl.c811
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c62
-rw-r--r--drivers/phy/Kconfig9
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-zynqmp.c1591
-rw-r--r--drivers/pinctrl/Kconfig8
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c1074
-rw-r--r--drivers/remoteproc/Kconfig20
-rw-r--r--drivers/remoteproc/Makefile2
-rw-r--r--drivers/remoteproc/remoteproc_internal.h23
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c140
-rw-r--r--drivers/remoteproc/zynq_remoteproc.c479
-rw-r--r--drivers/remoteproc/zynqmp_r5_remoteproc.c966
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/rtc/rtc-zynqmp.c2
-rw-r--r--drivers/soc/xilinx/Kconfig7
-rw-r--r--drivers/soc/xilinx/Makefile4
-rw-r--r--drivers/soc/xilinx/xlnx_vcu.c579
-rw-r--r--drivers/soc/xilinx/xlnx_vcu_clk.c915
-rw-r--r--drivers/soc/xilinx/xlnx_vcu_core.c168
-rw-r--r--drivers/soc/xilinx/zynqmp/Makefile1
-rw-r--r--drivers/soc/xilinx/zynqmp/tap_delays.c69
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c121
-rw-r--r--drivers/spi/Kconfig15
-rw-r--r--drivers/spi/spi-mem.c2
-rw-r--r--drivers/spi/spi-xilinx.c1055
-rw-r--r--drivers/spi/spi-zynq-qspi.c43
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c402
-rw-r--r--drivers/spi/spi.c8
-rw-r--r--drivers/staging/Kconfig14
-rw-r--r--drivers/staging/Makefile6
-rw-r--r--drivers/staging/apf/Kconfig19
-rw-r--r--drivers/staging/apf/Makefile9
-rw-r--r--drivers/staging/apf/dt-binding.txt17
-rw-r--r--drivers/staging/apf/xilinx-dma-apf.c1232
-rw-r--r--drivers/staging/apf/xilinx-dma-apf.h234
-rw-r--r--drivers/staging/apf/xlnk-eng.c242
-rw-r--r--drivers/staging/apf/xlnk-eng.h33
-rw-r--r--drivers/staging/apf/xlnk-ioctl.h37
-rw-r--r--drivers/staging/apf/xlnk-sysdef.h34
-rw-r--r--drivers/staging/apf/xlnk.c1580
-rw-r--r--drivers/staging/apf/xlnk.h175
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c398
-rw-r--r--drivers/staging/clocking-wizard/dt-binding.txt4
-rw-r--r--drivers/staging/fclk/Kconfig9
-rw-r--r--drivers/staging/fclk/Makefile1
-rw-r--r--drivers/staging/fclk/TODO2
-rw-r--r--drivers/staging/fclk/dt-binding.txt16
-rw-r--r--drivers/staging/fclk/xilinx_fclk.c125
-rw-r--r--drivers/staging/xlnx_ctrl_driver/Kconfig15
-rw-r--r--drivers/staging/xlnx_ctrl_driver/MAINTAINERS4
-rw-r--r--drivers/staging/xlnx_ctrl_driver/Makefile2
-rw-r--r--drivers/staging/xlnx_ctrl_driver/xlnx_frmb.c290
-rw-r--r--drivers/staging/xlnx_ctrl_driver/xlnx_vpss.c595
-rw-r--r--drivers/staging/xlnx_ernic/Kconfig4
-rw-r--r--drivers/staging/xlnx_ernic/MAINTAINERS4
-rw-r--r--drivers/staging/xlnx_ernic/Makefile7
-rw-r--r--drivers/staging/xlnx_ernic/dt-binding.txt29
-rw-r--r--drivers/staging/xlnx_ernic/xcm.c1962
-rw-r--r--drivers/staging/xlnx_ernic/xcm.h170
-rw-r--r--drivers/staging/xlnx_ernic/xcommon.h73
-rw-r--r--drivers/staging/xlnx_ernic/xernic_bw_test.c482
-rw-r--r--drivers/staging/xlnx_ernic/xhw_config.h26
-rw-r--r--drivers/staging/xlnx_ernic/xhw_def.h641
-rw-r--r--drivers/staging/xlnx_ernic/xif.h239
-rw-r--r--drivers/staging/xlnx_ernic/xioctl.h24
-rw-r--r--drivers/staging/xlnx_ernic/xmain.c1592
-rw-r--r--drivers/staging/xlnx_ernic/xmain.h33
-rw-r--r--drivers/staging/xlnx_ernic/xmr.c413
-rw-r--r--drivers/staging/xlnx_ernic/xmr.h68
-rw-r--r--drivers/staging/xlnx_ernic/xperftest.h33
-rw-r--r--drivers/staging/xlnx_ernic/xqp.c1310
-rw-r--r--drivers/staging/xlnx_ernic/xqp.h114
-rw-r--r--drivers/staging/xlnx_ernic/xrocev2.h409
-rw-r--r--drivers/staging/xlnxsync/Kconfig11
-rw-r--r--drivers/staging/xlnxsync/MAINTAINERS4
-rw-r--r--drivers/staging/xlnxsync/Makefile1
-rw-r--r--drivers/staging/xlnxsync/dt-binding.txt34
-rw-r--r--drivers/staging/xlnxsync/xlnxsync.c875
-rw-r--r--drivers/staging/xroeframer/Kconfig18
-rw-r--r--drivers/staging/xroeframer/Makefile12
-rw-r--r--drivers/staging/xroeframer/README47
-rw-r--r--drivers/staging/xroeframer/roe_framer_ctrl.h1088
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe.c562
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c718
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c571
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_stats.c401
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_udp.c181
-rw-r--r--drivers/staging/xroeframer/xroe_framer.c155
-rw-r--r--drivers/staging/xroeframer/xroe_framer.h63
-rw-r--r--drivers/staging/xroetrafficgen/Kconfig14
-rw-r--r--drivers/staging/xroetrafficgen/Makefile8
-rw-r--r--drivers/staging/xroetrafficgen/README19
-rw-r--r--drivers/staging/xroetrafficgen/roe_radio_ctrl.h183
-rw-r--r--drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c824
-rw-r--r--drivers/staging/xroetrafficgen/xroe-traffic-gen.c124
-rw-r--r--drivers/staging/xroetrafficgen/xroe-traffic-gen.h15
-rw-r--r--drivers/tty/hvc/hvc_dcc.c28
-rw-r--r--drivers/tty/serial/8250/8250_of.c4
-rw-r--r--drivers/tty/serial/Kconfig9
-rw-r--r--drivers/tty/serial/uartlite.c265
-rw-r--r--drivers/tty/serial/xilinx_uartps.c13
-rw-r--r--drivers/uio/Kconfig24
-rw-r--r--drivers/uio/Makefile4
-rw-r--r--drivers/uio/uio.c43
-rw-r--r--drivers/uio/uio_dmabuf.c210
-rw-r--r--drivers/uio/uio_dmabuf.h26
-rw-r--r--drivers/uio/uio_xilinx_ai_engine.c296
-rw-r--r--drivers/uio/uio_xilinx_apm.c369
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c5
-rw-r--r--drivers/usb/chipidea/host.c9
-rw-r--r--drivers/usb/chipidea/otg_fsm.c9
-rw-r--r--drivers/usb/dwc3/Kconfig8
-rw-r--r--drivers/usb/dwc3/Makefile10
-rw-r--r--drivers/usb/dwc3/core.c233
-rw-r--r--drivers/usb/dwc3/core.h97
-rw-r--r--drivers/usb/dwc3/debugfs.c50
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c357
-rw-r--r--drivers/usb/dwc3/ep0.c43
-rw-r--r--drivers/usb/dwc3/gadget.c330
-rw-r--r--drivers/usb/dwc3/gadget.h18
-rw-r--r--drivers/usb/dwc3/gadget_hibernation.c567
-rw-r--r--drivers/usb/dwc3/host.c23
-rw-r--r--drivers/usb/dwc3/otg.c2199
-rw-r--r--drivers/usb/dwc3/otg.h252
-rw-r--r--drivers/usb/dwc3/platform_data.h54
-rw-r--r--drivers/usb/gadget/configfs.c45
-rw-r--r--drivers/usb/gadget/function/f_tcm.c311
-rw-r--r--drivers/usb/gadget/function/tcm.h7
-rw-r--r--drivers/usb/gadget/function/uvc_video.c5
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c81
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c12
-rw-r--r--drivers/usb/host/xhci-hub.c7
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-plat.c59
-rw-r--r--drivers/usb/host/xhci-ring.c107
-rw-r--r--drivers/usb/host/xhci.c30
-rw-r--r--drivers/usb/host/xhci.h5
-rw-r--r--drivers/usb/phy/Kconfig1
-rw-r--r--drivers/usb/phy/phy-ulpi.c99
-rw-r--r--drivers/usb/storage/uas.c307
-rw-r--r--drivers/usb/storage/unusual_uas.h6
-rw-r--r--drivers/watchdog/cadence_wdt.c6
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c302
-rw-r--r--fs/ext4/super.c4
-rw-r--r--include/crypto/if_alg.h2
-rw-r--r--include/crypto/skcipher.h8
-rw-r--r--include/drm/drm_encoder_slave.h24
-rw-r--r--include/drm/drm_fourcc.h20
-rw-r--r--include/dt-bindings/clock/xlnx-versal-clk.h123
-rw-r--r--include/dt-bindings/drm/mipi-dsi.h11
-rw-r--r--include/dt-bindings/media/xilinx-vip.h6
-rw-r--r--include/dt-bindings/phy/phy.h2
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-zynqmp.h36
-rw-r--r--include/dt-bindings/power/xlnx-versal-power.h40
-rw-r--r--include/linux/clk-provider.h1
-rw-r--r--include/linux/clk/zynq.h4
-rw-r--r--include/linux/crypto.h12
-rw-r--r--include/linux/dma/xilinx_frmbuf.h204
-rw-r--r--include/linux/dma/xilinx_ps_pcie_dma.h69
-rw-r--r--include/linux/dmaengine.h19
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h336
-rw-r--r--include/linux/fpga/fpga-mgr.h20
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--include/linux/mailbox/zynqmp-ipi-message.h3
-rw-r--r--include/linux/mtd/mtd.h3
-rw-r--r--include/linux/mtd/onfi.h4
-rw-r--r--include/linux/mtd/rawnand.h11
-rw-r--r--include/linux/mtd/spi-nor.h43
-rw-r--r--include/linux/of.h333
-rw-r--r--include/linux/phy/phy-zynqmp.h60
-rw-r--r--include/linux/remoteproc.h6
-rw-r--r--include/linux/soc/xilinx/zynqmp/fw.h37
-rw-r--r--include/linux/soc/xilinx/zynqmp/tap_delays.h32
-rw-r--r--include/linux/spi/spi.h23
-rw-r--r--include/linux/usb/chipidea.h3
-rw-r--r--include/linux/usb/xhci_pdriver.h29
-rw-r--r--include/linux/xilinx_phy.h20
-rw-r--r--include/media/media-entity.h8
-rw-r--r--include/media/v4l2-subdev.h4
-rw-r--r--include/soc/xilinx/xlnx_vcu.h39
-rw-r--r--include/uapi/drm/drm_fourcc.h15
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--include/uapi/linux/if_alg.h1
-rw-r--r--include/uapi/linux/media-bus-format.h16
-rw-r--r--include/uapi/linux/uio/uio.h65
-rw-r--r--include/uapi/linux/v4l2-mediabus.h3
-rw-r--r--include/uapi/linux/v4l2-subdev.h23
-rw-r--r--include/uapi/linux/videodev2.h29
-rw-r--r--include/uapi/linux/xilinx-csi2rxss.h18
-rw-r--r--include/uapi/linux/xilinx-hls.h21
-rw-r--r--include/uapi/linux/xilinx-sdirxss.h66
-rw-r--r--include/uapi/linux/xilinx-v4l2-controls.h141
-rw-r--r--include/uapi/linux/xilinx-v4l2-events.h24
-rw-r--r--include/uapi/linux/xlnx_ctrl.h34
-rw-r--r--include/uapi/linux/xlnxsync.h111
-rw-r--r--include/uapi/linux/zocl_ioctl.h125
-rw-r--r--include/uapi/misc/xilinx_sdfec.h470
-rw-r--r--kernel/debug/kdb/kdb_main.c8
-rw-r--r--mm/page_alloc.c2
-rw-r--r--net/tipc/bearer.c5
-rw-r--r--samples/xilinx_apm/Makefile71
-rw-r--r--samples/xilinx_apm/main.c134
-rw-r--r--samples/xilinx_apm/xaxipmon.c1269
-rw-r--r--samples/xilinx_apm/xaxipmon.h943
-rw-r--r--sound/soc/xilinx/Kconfig35
-rw-r--r--sound/soc/xilinx/Makefile6
-rw-r--r--sound/soc/xilinx/xilinx-dp-card.c113
-rw-r--r--sound/soc/xilinx/xilinx-dp-codec.c178
-rw-r--r--sound/soc/xilinx/xilinx-dp-pcm.c76
-rw-r--r--sound/soc/xilinx/xlnx_formatter_pcm.c226
-rw-r--r--sound/soc/xilinx/xlnx_i2s.c115
-rw-r--r--sound/soc/xilinx/xlnx_pl_snd_card.c432
-rw-r--r--sound/soc/xilinx/xlnx_sdi_audio.c610
-rw-r--r--sound/soc/xilinx/xlnx_snd_common.h23
655 files changed, 151241 insertions, 3459 deletions
diff --git a/Documentation/ABI/stable/sysfs-firmware-zynqmp b/Documentation/ABI/stable/sysfs-firmware-zynqmp
new file mode 100644
index 000000000000..eeae291a048c
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-firmware-zynqmp
@@ -0,0 +1,103 @@
+What: /sys/firmware/zynqmp/ggs*
+Date: January 2018
+KernelVersion: 4.15.0
+Contact: "Jolly Shah" <jollys@xilinx.com>
+Description:
+ Read/Write PMU global general storage register value,
+ GLOBAL_GEN_STORAGE{0:3}.
+ Global general storage register that can be used
+ by system to pass information between masters.
+
+ The register is reset during system or power-on
+ resets. Three registers are used by the FSBL and
+ other Xilinx software products: GLOBAL_GEN_STORAGE{4:6}.
+
+ Usage:
+ # cat /sys/firmware/zynqmp/ggs0
+ # echo <mask> <value> > /sys/firmware/zynqmp/ggs0
+
+ Example:
+ # cat /sys/firmware/zynqmp/ggs0
+ # echo 0xFFFFFFFF 0x1234ABCD > /sys/firmware/zynqmp/ggs0
+
+Users: Xilinx
+
+What: /sys/firmware/zynqmp/pggs*
+Date: January 2018
+KernelVersion: 4.15.0
+Contact: "Jolly Shah" <jollys@xilinx.com>
+Description:
+ Read/Write PMU persistent global general storage register
+ value, PERS_GLOB_GEN_STORAGE{0:3}.
+ Persistent global general storage register that
+ can be used by system to pass information between
+ masters.
+
+ This register is only reset by the power-on reset
+ and maintains its value through a system reset.
+ Four registers are used by the FSBL and other Xilinx
+ software products: PERS_GLOB_GEN_STORAGE{4:7}.
+ Register is reset only by a POR reset.
+
+ Usage:
+ # cat /sys/firmware/zynqmp/pggs0
+ # echo <mask> <value> > /sys/firmware/zynqmp/pggs0
+
+ Example:
+ # cat /sys/firmware/zynqmp/pggs0
+ # echo 0xFFFFFFFF 0x1234ABCD > /sys/firmware/zynqmp/pggs0
+
+Users: Xilinx
+
+What: /sys/firmware/zynqmp/shutdown_scope
+Date: February 2018
+KernelVersion: 4.15.6
+Contact: "Jolly Shah" <jollys@xilinx.com>
+Description:
+ This sysfs interface allows to set the shutdown scope for the
+ next shutdown request. When the next shutdown is performed, the
+ platform specific portion of PSCI-system_off can use the chosen
+ shutdown scope.
+
+ Following are available shutdown scopes(subtypes):
+
+ subsystem: Only the APU along with all of its peripherals
+ not used by other processing units will be
+ shut down. This may result in the FPD power
+ domain being shut down provided that no other
+ processing unit uses FPD peripherals or DRAM.
+ ps_only: The complete PS will be shut down, including the
+ RPU, PMU, etc. Only the PL domain (FPGA)
+ remains untouched.
+ system: The complete system/device is shut down.
+
+ Usage:
+ # cat /sys/firmware/zynqmp/shutdown_scope
+ # echo <scope> > /sys/firmware/zynqmp/shutdown_scope
+
+ Example:
+ # cat /sys/firmware/zynqmp/shutdown_scope
+ # echo "subsystem" > /sys/firmware/zynqmp/shutdown_scope
+
+Users: Xilinx
+
+What: /sys/firmware/zynqmp/health_status
+Date: April 2018
+KernelVersion: 4.14.0
+Contact: "Rajan Vaja" <rajanv@xilinx.com>
+Description:
+ This sysfs interface allows to set the health status. If PMUFW
+ is compiled with CHECK_HEALTHY_BOOT, it will check the healthy
+ bit on FPD WDT expiration. If healthy bit is set by a user
+ application running in Linux, PMUFW will do APU only restart. If
+ healthy bit is not set during FPD WDT expiration, PMUFW will do
+ system restart.
+
+ Usage:
+ Set healty bit
+ # echo 1 > /sys/firmware/zynqmp/health_status
+
+ Unset healty bit
+ # echo 0 > /sys/firmware/zynqmp/health_status
+
+Users: Xilinx
diff --git a/Documentation/ABI/testing/sysfs-class-fpga-bridge b/Documentation/ABI/testing/sysfs-class-fpga-bridge
index 312ae2c579d8..676700d7a61f 100644
--- a/Documentation/ABI/testing/sysfs-class-fpga-bridge
+++ b/Documentation/ABI/testing/sysfs-class-fpga-bridge
@@ -9,3 +9,12 @@ Date: January 2016
KernelVersion: 4.5
Contact: Alan Tull <atull@opensource.altera.com>
Description: Show bridge state as "enabled" or "disabled"
+
+What: /sys/class/fpga_bridge/<bridge>/set
+Date: January 2017
+KernelVersion: 4.9
+Contact: Michal Simek <michal.simek@xilinx.com>
+Description: Manual set bridge state (0-disable, !0 enable).
+ Enabling this option requires that the module is
+ compiled with #define DEBUG which is enabled by default
+ when CONFIG_DEBUG_KERNEL is setup.
diff --git a/Documentation/ABI/testing/sysfs-driver-cortexa53-edac b/Documentation/ABI/testing/sysfs-driver-cortexa53-edac
new file mode 100644
index 000000000000..87ed5ca3af22
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-cortexa53-edac
@@ -0,0 +1,10 @@
+What: /sys/devices/system/edac/cpu_cache/inject_(L1/L2)_Cache_Error
+Date: June 2016
+Contact: nagasure@xilinx.com
+ punnaia@xilinx.com
+Description: This control file allows to inject cache errors on cortexa53
+ L1 and L2 caches. arm provided error injection for cortexa53
+ caches (L1 and L2). Just echo 1 > /sys/devices/system/edac/
+ cpu_cache/inject_L1_Error for L1 cache error injection and
+ echo 1 > /sys/devices/system/edac/cpu_cache/inject_L2_Error
+ for L2 cache error injection.
diff --git a/Documentation/devicetree/bindings/arm/zynq/zynq-efuse.txt b/Documentation/devicetree/bindings/arm/zynq/zynq-efuse.txt
new file mode 100644
index 000000000000..39817e9750c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/zynq/zynq-efuse.txt
@@ -0,0 +1,15 @@
+Device tree bindings for Zynq's eFuse Controller
+
+The Zynq eFuse controller provides the access to the chip efuses which contain
+information about device DNA, security settings and also device status.
+
+Required properties:
+ compatible: Compatibility string. Must be "xlnx,zynq-efuse".
+ reg: Specify the base and size of the EFUSE controller registers
+ in the memory map. E.g.: reg = <0xf800d000 0x20>;
+
+Example:
+efuse: efuse@f800d000 {
+ compatible = "xlnx,zynq-efuse";
+ reg = <0xf800d000 0x20>;
+};
diff --git a/Documentation/devicetree/bindings/arm/zynq/zynq-ocmc.txt b/Documentation/devicetree/bindings/arm/zynq/zynq-ocmc.txt
new file mode 100644
index 000000000000..b6dbf05b4eb5
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/zynq/zynq-ocmc.txt
@@ -0,0 +1,17 @@
+Device tree bindings for Zynq's OCM Controller
+
+The OCM is divided to 4 64kB segments which can be separately configured
+to low or high location. Location is controlled via SLCR.
+
+Required properties:
+ compatible: Compatibility string. Must be "xlnx,zynq-ocmc-1.0".
+ reg: Specify the base and size of the OCM controller registers
+ in the memory map. E.g.: reg = <0xf800c000 0x1000>;
+
+Example:
+ocmc: ocmc@f800c000 {
+ compatible = "xlnx,zynq-ocmc-1.0";
+ interrupt-parent = <&intc>;
+ interrupts = <0 3 4>;
+ reg = <0xf800c000 0x1000>;
+} ;
diff --git a/Documentation/devicetree/bindings/ata/ahci-ceva.txt b/Documentation/devicetree/bindings/ata/ahci-ceva.txt
index 7561cc4de371..d34f11771d5f 100644
--- a/Documentation/devicetree/bindings/ata/ahci-ceva.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-ceva.txt
@@ -38,6 +38,8 @@ Required properties:
Optional properties:
- ceva,broken-gen2: limit to gen1 speed instead of gen2.
+ - dma-coherent: Enable this flag if CCI is enabled in design.
+ Adding this flag configures AXI cache control register.
Examples:
ahci@fd0c0000 {
@@ -56,4 +58,5 @@ Examples:
ceva,p1-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x0216 0x7F06>;
ceva,broken-gen2;
+ dma-coherent;
};
diff --git a/Documentation/devicetree/bindings/clock/idt,idt8t49n24x.txt b/Documentation/devicetree/bindings/clock/idt,idt8t49n24x.txt
new file mode 100644
index 000000000000..8b52017cf1c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/idt,idt8t49n24x.txt
@@ -0,0 +1,156 @@
+Binding for IDT 8T49N24x Universal Frequency Translator
+
+The 8T49N241 has one fractional-feedback PLL that can be used as a
+jitter attenuator and frequency translator. It is equipped with one
+integer and three fractional output dividers, allowing the generation
+of up to four different output frequencies, ranging from 8kHz to 1GHz.
+These frequencies are completely independent of each other, the input
+reference frequencies and the crystal reference frequency. The device
+places virtually no constraints on input to output frequency conversion,
+supporting all FEC rates, including the new revision of ITU-T
+Recommendation G.709 (2009), most with 0ppm conversion error.
+The outputs may select among LVPECL, LVDS, HCSL or LVCMOS output levels.
+
+The driver can read a full register map from the DT, and will use that
+register map to initialize the attached part (via I2C) when the system
+boots. Any configuration not supported by the common clock framework
+must be done via the full register map, including optimized settings.
+
+The 8T49N241 accepts up to two differential or single-ended input clocks
+and a fundamental-mode crystal input. The internal PLL can lock to either
+of the input reference clocks or just to the crystal to behave as a
+frequency synthesizer. The PLL can use the second input for redundant
+backup of the primary input reference, but in this case, both input clock
+references must be related in frequency.
+
+All outputs are currently assumed to be LVDS, unless overridden in the
+full register map in the DT.
+
+==I2C device node==
+
+Required properties:
+- compatible: shall be one of "idt,idt8t49n241"
+- reg: i2c device address, shall be one of 0x7C, 0x6C, 0x7D, 0x6D,
+ 0x7E, 0x6E, 0x7F, 0x6F.
+- #clock-cells: From common clock bindings: Shall be 1.
+
+- clocks: from common clock binding; input clock handle. Required.
+- clock-names: from common clock binding; clock input names, shall be
+ one of "input-clk0", "input-clk1", "input-xtal". Required.
+
+==Mapping between clock specifier and physical pins==
+
+When referencing the provided clock in the DT using phandle and
+clock specifier, the following mapping applies:
+
+8T49N241:
+ 0 -- Q0
+ 1 -- Q1
+ 2 -- Q2
+ 3 -- Q3
+
+==Example==
+
+/* Example1: 25MHz input clock (via CLK0) */
+
+ref25: ref25m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+};
+
+i2c-master-node {
+
+ /* IDT 8T49N241 i2c universal frequency translator */
+ i2c241: clock-generator@6a {
+ compatible = "idt,idt8t49n241";
+ reg = <0x6c>;
+ #clock-cells = <1>;
+
+ /* Connect input-clk0 to 25MHz reference */
+ clocks = <&ref25m>;
+ clock-names = "input-clk0";
+ };
+};
+
+/* Consumer referencing the 8T49N241 pin Q1 */
+consumer {
+ ...
+ clocks = <&i2c241 1>;
+ ...
+}
+
+/* Example2: 40MHz xtal frequency, specify all settings */
+
+ref40: ref40m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <40000000>;
+};
+
+i2c-master-node {
+
+ /* IDT 8T49N241 i2c universal frequency translator */
+ i2c241: clock-generator@6a {
+ compatible = "idt,idt8t49n241";
+ reg = <0x6c>;
+ #clock-cells = <1>;
+
+ /* Connect input-xtal to 40MHz reference */
+ clocks = <&ref40m>;
+ clock-names = "input-xtal";
+
+ settings=[
+09 50 00 60 67 C5 6C FF 03 00 30 00 00 01 00 00
+01 07 00 00 07 00 00 77 6D 06 00 00 00 00 00 FF
+FF FF FF 00 3F 00 2A 00 16 33 33 00 01 00 00 D0
+00 00 00 00 00 00 00 00 00 04 00 00 00 02 00 00
+00 00 00 00 00 00 00 17 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 D7 0A 2B 20 00 00 00 0B
+00 00 00 00 00 00 00 00 00 00 27 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+C3 00 08 01 00 00 00 00 00 00 00 00 00 30 00 00
+00 0A 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 85 00 00 9C 01 D4 02 71 07 00 00 00
+00 83 00 10 02 08 8C
+];
+ };
+};
diff --git a/Documentation/devicetree/bindings/clock/silabs,si5324.txt b/Documentation/devicetree/bindings/clock/silabs,si5324.txt
new file mode 100644
index 000000000000..642af113aa6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/silabs,si5324.txt
@@ -0,0 +1,78 @@
+Binding for Silicon Labs si5324, si5328 and si5319 programmable
+I2C clock generator.
+
+Reference
+This binding uses the common clock binding[1].
+The si5324 is programmable i2c low-bandwidth, jitter-attenuating, precision
+clock multiplier with up to 2 output clocks. The internal structure can be
+found in [2].
+The internal pin structure of si5328 and si5319 can be found in [3].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Si5324 Data Sheet
+ http://www.silabs.com/Support%20Documents/TechnicalDocs/Si5324.pdf
+[3] Si53xx Reference Manual
+ http://www.silabs.com/Support%20Documents/TechnicalDocs/
+ Si53xxReferenceManual.pdf
+
+==I2C device node==
+
+Required properties:
+- compatible: should be one of
+ "silabs,si5324"
+ "silabs,si5319"
+ "silabs,si5328"
+- reg: i2c device address.
+- #clock-cells: from common clock binding; shall be set to 1.
+- clocks: from common clock binding; list of parent clock
+ handles, clock name should be one of
+ "xtal"
+ "clkin1"
+ "clkin2"
+- #address-cells: shall be set to 1.
+- #size-cells: shall be set to 0.
+
+Optional properties:
+- silabs,pll-source: pair of (number, source) for each pll. Allows
+ to overwrite clock source of pll.
+
+==Child nodes==
+
+Each of the clock outputs can be overwritten individually by
+using a child node to the I2C device node. If a child node for a clock
+output is not set, the eeprom configuration is not overwritten.
+
+Required child node properties:
+- reg: number of clock output.
+- clock-frequency: default output frequency at power on
+
+Optional child node properties:
+- silabs,drive-strength: output drive strength in mA, shall be one of {2,4,6,8}.
+
+Example:
+Following example describes the ZCU102 board with hdmi design which
+uses si5319 as clock generator. XTAL is hard-wired on the board to act
+as input clock with a frequency of 114.285MHz.
+
+refhdmi: refhdmi {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <114285000>;
+};
+
+/* Si5319 i2c clock generator */
+si5319: clock-generator@68 {
+ status = "okay";
+ compatible = "silabs,si5319";
+ reg = <0x68>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #clock-cells = <1>;
+ clocks = <&refhdmi>;
+ clock-names = "xtal";
+
+ clk0 {
+ reg = <0>;
+ clock-frequency = <27000000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.txt b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.txt
new file mode 100644
index 000000000000..94a27f65bcac
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.txt
@@ -0,0 +1,48 @@
+--------------------------------------------------------------------------
+Device Tree Clock bindings for the Xilinx Versal
+--------------------------------------------------------------------------
+The clock controller is a h/w block of Xilinx versal clock tree. It reads
+required input clock frequencies from the devicetree and acts as clock provider
+for all clock consumers of PS clocks.
+
+See clock_bindings.txt for more information on the generic clock bindings.
+
+Required properties:
+ - #clock-cells: Must be 1
+ - compatible: Must contain: "xlnx,versal-clk"
+ - clocks: List of clock specifiers which are external input
+ clocks to the given clock controller. Please refer
+ the next section to find the input clocks for a
+ given controller.
+ - clock-names: List of clock names which are exteral input clocks
+ to the given clock controller. Please refer to the
+ clock bindings for more details.
+
+Input clocks for Xilinx Versal clock controller:
+
+The Xilinx Versal has one primary and two alternative reference clock inputs.
+These required clock inputs are:
+ - ref_clk
+ - alt_ref_clk
+ - pl_alt_ref_clk
+
+Output clocks are registered based on clock information received
+from firmware. Output clocks indexes are mentioned in
+include/dt-bindings/clock/xlnx-versal-clk.h.
+
+-------
+Example
+-------
+
+firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+ versal_clk: clock-controller {
+ #clock-cells = <1>;
+ compatible = "xlnx,versal-clk";
+ clocks = <&ref_clk>, <&alt_ref_clk>, <&pl_alt_ref_clk>;
+ clock-names = "ref_clk", "alt_ref_clk", "pl_alt_ref_clk";
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt b/Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt
new file mode 100644
index 000000000000..226bfb9261d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt
@@ -0,0 +1,12 @@
+Xilinx ZynqMP AES hw acceleration support
+
+The ZynqMP PS-AES hw accelerator is used to encrypt/decrypt
+the given user data.
+
+Required properties:
+- compatible: should contain "xlnx,zynqmp-aes"
+
+Example:
+ zynqmp_aes {
+ compatible = "xlnx,zynqmp-aes";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt b/Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt
new file mode 100644
index 000000000000..6b4c0e0446fc
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt
@@ -0,0 +1,12 @@
+Xilinx ZynqMP RSA hw acceleration support
+
+The zynqmp PS-RSA hw accelerator is used to encrypt/decrypt
+the given user data.
+
+Required properties:
+- compatible: should contain "xlnx,zynqmp-rsa"
+
+Example:
+ xlnx_rsa: zynqmp_rsa {
+ compatible = "xlnx,zynqmp-rsa";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/zynqmp-sha.txt b/Documentation/devicetree/bindings/crypto/zynqmp-sha.txt
new file mode 100644
index 000000000000..c7be6e2ce246
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/zynqmp-sha.txt
@@ -0,0 +1,12 @@
+Xilinx ZynqMP SHA3(keccak-384) hw acceleration support.
+
+The ZynqMp PS-SHA hw accelerator is used to calculate the
+SHA3(keccak-384) hash value on the given user data.
+
+Required properties:
+- compatible: should contain "xlnx,zynqmp-keccak-384"
+
+Example:
+ xlnx_keccak_384: sha384 {
+ compatible = "xlnx,zynqmp-keccak-384";
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/bridge.txt b/Documentation/devicetree/bindings/display/xlnx/bridge.txt
new file mode 100644
index 000000000000..c5f7c0a1dea0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/bridge.txt
@@ -0,0 +1,29 @@
+Xilinx DRM bridge
+-----------------
+
+The Xilinx DRM provides the interface layer called Xilinx bridge to bridge
+multiple components with a series of functions. It models a simple
+unidirectional communication, single client -> single bridge. The client
+is not limited to DRM compatible drivers, but can be any subsystem driver,
+but the client driver should call the bridge functions explicitly.
+
+Provider
+--------
+
+The bridge provider should assign a corresponding of_node to struct xlnx_bridge.
+For example, if its own node is used,
+
+ provider_node: provider_node {
+ };
+
+ bridge.of_node = provider_device->of_node;
+
+Client
+------
+
+The bridge client should have a phandle to the bridge device node. The bridge
+device node should be passed to get a bridge instance,
+
+ client_node {
+ xlnx,bridge = <&provider_node>;
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,dsi.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,dsi.txt
new file mode 100644
index 000000000000..55508167e606
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,dsi.txt
@@ -0,0 +1,73 @@
+Device-Tree bindings for Xilinx MIPI DSI Tx IP core
+
+The IP core supports transmission of video data in MIPI DSI protocol.
+
+Required properties:
+ - compatible: Should be "xlnx,dsi".
+
+ - reg: Base address and size of the IP core.
+
+ - xlnx,dsi-datatype: Color format. The value should be one of "MIPI_DSI_FMT_RGB888",
+ "MIPI_DSI_FMT_RGB666", "MIPI_DSI_FMT_RGB666_PACKED" or "MIPI_DSI_FMT_RGB565".
+
+ - simple_panel: The subnode for connected panel. This represents the
+ DSI peripheral connected to the DSI host node. Please refer to
+ Documentation/devicetree/bindings/display/mipi-dsi-bus.txt. The
+ simple-panel driver has auo,b101uan01 panel timing parameters added along
+ with other existing panels. DSI driver derive the required Tx IP controller
+ timing values from the panel timing parameters.
+
+ - port: Logical block can be used / connected independently with
+ external device. In the display controller port nodes, topology
+ for entire pipeline should be described using the DT bindings defined in
+ Documentation/devicetree/bindings/graph.txt.
+
+ - xlnx,dsi-num-lanes: Possible number of DSI lanes for the Tx controller.
+ The values should be 1, 2, 3 or 4. Based on xlnx,dsi-num-lanes and
+ line rate for the MIPI D-PHY core in Mbps, the AXI4-stream received by
+ Xilinx MIPI DSI Tx IP core adds markers as per DSI protocol and the packet
+ thus framed is convered to serial data by MIPI D-PHY core. Please refer
+ Xilinx pg238 for more details. This value should be equal to the number
+ of lanes supported by the connected DSI panel. Panel has to support this
+ value or has to be programmed to the same value that DSI Tx controller is
+ configured to.
+
+ - clocks: List of phandles to Video and 200Mhz DPHY clocks.
+
+ - clock-names: Must contain "s_axis_aclk" and "dphy_clk_200M" in same order as
+ clocks listed in clocks property.
+
+Required simple_panel properties:
+ - compatible: Value should be one of the panel names in
+ Documentation/devicetree/bindings/display/panel/. e.g. "auo,b101uan01".
+ For available panel compatible strings, please refer to bindings in
+ Documentation/devicetree/bindings/display/panel/
+
+Optional properties:
+ - xlnx,vpss: vpss phandle
+ This handle is required only when VPSS is connected to DSI as bridge.
+
+Example:
+
+#include <dt-bindings/drm/mipi-dsi.h>
+ mipi_dsi_tx_subsystem@80000000 {
+ compatible = "xlnx,dsi";
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,dsi-num-lanes = <4>;
+ xlnx,dsi-data-type = <MIPI_DSI_FMT_RGB888>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ xlnx,vpss = <&v_proc_ss_0>;
+ clock-names = "dphy_clk_200M", "s_axis_aclk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>;
+ encoder_dsi_port: port@0 {
+ reg = <0>;
+ dsi_encoder: endpoint {
+ remote-endpoint = <&xyz_port>;
+ };
+ };
+ simple_panel: simple-panel@0 {
+ compatible = "auo,b101uan01";
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,mixer.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,mixer.txt
new file mode 100644
index 000000000000..20f4cec27175
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,mixer.txt
@@ -0,0 +1,163 @@
+Device-Tree bindings for Xilinx Video Mixer IP core
+
+The IP core provides a flexible video processing block for alpha blending
+and compositing multiple video and/or graphics layers.
+Support for up to sixteen layers based on IP version, with an optional logo
+layer, using a combination of video inputs from either frame buffer or
+streaming video cores (through AXI4-Stream interfaces) is provided.
+The Video Mixer always has one streaming input layer, known as master layer.
+
+Required properties:
+ - compatible: Must contain atleast one of
+ "xlnx,mixer-4.0" (MIXER 4.0 version)
+ "xlnx,mixer-3.0" (MIXER 3.0 version)
+ - reg: Base address and size of the IP core.
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - reset-gpio: gpio to reset the mixer IP
+ - xlnx,dma-addr-width: dma address width, valid values are 32 and 64
+ - xlnx,bpc: bits per component for mixer
+ - xlnx,ppc: pixel per clock for mixer
+ - xlnx,num-layers: Total number of layers (excluding logo)
+ Value ranges from 1-9 for compatible string xlnx,mixer-3.0 and
+ Value ranges from 1-17 for comptaible string xlnx,mixer-4.0
+ - layer_[x]: node for [x] layer
+ - xlnx,layer-id: layer identifier number
+ - xlnx,vformat: video format for layer. See list of supported formats below.
+ - xlnx,layer-max-width: max layer width, mandatory for master layer
+ for overlay layers if scaling is alowed then this is mandatory otherwise
+ not required for overlay layers
+ - xlnx,layer-max-height: max layer height, mandatory for master layer
+ Not required for overlay layers
+ - xlnx,layer-primary: denotes the primary layer, should be mentioned in node
+ of layer which is expected to be constructing the primary plane
+
+Optional properties:
+ - dmas: dma attach to layer, mandatory for master layer
+ for rest other layers its optional
+ - dma-names: Should be "dma0", for more details on DMA identifier string
+ refer Documentation/devicetree/bindings/dma/dma.txt
+ - xlnx,layer-streaming: denotes layer can be streaming,
+ mandatory for master layer. Streaming layers need external dma, where
+ as non streaming layers read directly from memory.
+ - xlnx,layer-alpha: denotes layer can do alpha compositing
+ - xlnx,layer-scale: denotes layer can be scale to 2x and 4x
+ - xlnx,logo-layer: denotes logo layer is enable
+ - logo: logo layer
+ - xlnx,bridge: phandle to bridge node.
+ This handle is required only when VTC is connected as bridge.
+
+Supported Formats:
+ Mixer IP Format Driver supported Format String
+ BGR888 "RG24"
+ RGB888 "BG24"
+ XBGR2101010 "XB30"
+ XRGB8888 "XR24"
+ RGBA8888 "RA24"
+ ABGR8888 "AB24"
+ ARGB8888 "AR24"
+ XBGR8888 "XB24"
+ YUYV "YUYV"
+ UYVY "UYVY"
+ AYUV "AYUV"
+ NV12 "NV12"
+ NV16 "NV16"
+ Y8 "GREY"
+ Y10 "Y10 " (Note: Space included)
+ XVUY2101010 "XV30"
+ VUY888 "VU24"
+ XVUY8888 "XV24"
+ XV15 "XV15"
+ XV20 "XV20"
+Note : Format strings are case sensitive.
+
+Example:
+ v_mix_0: v_mix@80100000 {
+ compatible = "xlnx,mixer-3.0";
+ interrupt-parent = <&gic>;
+ interrupts = <0 93 4>;
+ reg = <0x0 0x80100000 0x0 0x80000>;
+
+ xlnx,dma-addr-width=<32>;
+ reset-gpios = <&gpio 1 1>;
+
+ xlnx,bpc = <8>;
+ xlnx,ppc = <2>;
+ xlnx,num-layers = <8>;
+ xlnx,logo-layer;
+ xlnx,bridge = <&v_tc_0>;
+
+ mixer_port: mixer_port@0 {
+ reg = <0>;
+ mixer_crtc: endpoint {
+ remote-endpoint = <&sdi_encoder>;
+ };
+ };
+ xv_mix_master: layer_0 {
+ xlnx,layer-id = <0>;
+ xlnx,vformat = "YUYV";
+ xlnx,layer-max-width = <4096>;
+ xlnx,layer-height = <2160>;
+ dmas = <&axi_vdma_0 0>;
+ dma-names = "dma0";
+ xlnx,layer-streaming;
+ xlnx,layer-primary;
+ };
+ xv_mix_overlay_1: layer_1 {
+ xlnx,layer-id = <1>;
+ xlnx,vformat = "NV16";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_2: layer_2 {
+ xlnx,layer-id = <2>;
+ xlnx,vformat = "YUYV";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_3: layer_3 {
+ xlnx,layer-id = <3>;
+ xlnx,vformat = "AYUV";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_4: layer_4 {
+ xlnx,layer-id = <4>;
+ xlnx,vformat = "GREY";
+ dmas = <&scaler_v_frmbuf_rd_0 0>;
+ dma-names = "dma0";
+ xlnx,layer-streaming;
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_5: layer_5 {
+ xlnx,layer-id = <5>;
+ xlnx,vformat = "AB24";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_6: layer_6 {
+ xlnx,layer-id = <6>;
+ xlnx,vformat = "XB24";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_7: layer_7 {
+ xlnx,layer-id = <7>;
+ xlnx,vformat = "BG24";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_logo: logo {
+ xlnx,layer-id = <8>;
+ xlnx,logo-height = <64>;
+ xlnx,logo-width = <64>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,pl-disp.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,pl-disp.txt
new file mode 100644
index 000000000000..c6034bffc64a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,pl-disp.txt
@@ -0,0 +1,41 @@
+Xilinx PL Display driver
+------------------------
+
+Pl_Display is a logical device to provide completeness to xilinx display
+pipeline. This is a software driver for providing drm components crtc
+and plane for various IPs using xilinx display pipelines.
+
+A linear pipeline with multiple blocks:
+DMA --> PL_Display --> SDI
+
+Required properties:
+
+- compatible: Must be "xlnx,pl-disp"
+- dmas: dma attach to pipeline
+- dma-names: names for dma
+- xlnx,vformat: video format for layer
+- port: Logical block can be used / connected independently with
+ external device. In the display controller port nodes, topology
+ for entire pipeline should be described using the DT bindings defined in
+ Documentation/devicetree/bindings/graph.txt.
+- reg: Base address and size of device
+
+Optional properties:
+ - xlnx,bridge: bridge phandle
+ This handle is required only when VTC is connected as bridge.
+
+Example:
+
+ drm-pl-disp-drv {
+ compatible = "xlnx,pl-disp";
+ dmas = <&axi_vdma_0 0>;
+ dma-names = "dma0";
+ xlnx,vformat = "YUYV";
+ xlnx,bridge = <&v_tc_0>;
+ pl_disp_port@0 {
+ reg = <0>;
+ endpoint {
+ remote-endpoint = <&sdi_port>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt
new file mode 100644
index 000000000000..971ac5304761
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt
@@ -0,0 +1,54 @@
+Device-Tree bindings for Xilinx SDI Tx subsystem
+
+The IP core supports transmission of video data in SDI Tx protocol
+
+Required properties:
+ - compatible: Should be "xlnx,sdi-tx".
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - reg: Base address and size of the IP core.
+ - port: Logical block can be used / connected independently with
+ external device. In the display controller port nodes, topology
+ for entire pipeline should be described using the DT bindings defined in
+ Documentation/devicetree/bindings/graph.txt.
+ Minimum one port is required. At max, 2 ports are present.
+ The reg index for AXI4 stream port is 0 and for ancillary data is 1.
+ - clocks: List of phandles to AXI Lite, Video and SDI Tx Clock
+ - clock-names: Must contain "s_axi_aclk", "video_in_clk" and "sdi_tx_clk"
+ in same order as clocks listed in clocks property.
+
+Optional properties:
+ - xlnx,vpss: vpss phandle
+ This handle is required only when VPSS is connected to SDI as bridge.
+ - xlnx,tx-insert-c-str-st352: Insert ST352 payload in Chroma stream.
+
+Example:
+
+ sdi_tx_subsystem@80000000 {
+ compatible = "xlnx,sdi-tx";
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 90 4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ xlnx,vpss = <&v_proc_ss_0>;
+ clock-names = "s_axi_aclk", "video_in_clk", "sdi_tx_clk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>, <&misc_clk_2>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ encoder_sdi_port: port@0 {
+ reg = <0>;
+ sdi_encoder: endpoint {
+ remote-endpoint = <&pl_disp_crtc>;
+ };
+ };
+
+ sdi_audio_port: port@1 {
+ reg = <1>;
+ sdi_audio_sink_port: endpoint {
+ remote-endpoint = <&sditx_audio_embed_src_port>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-csc.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-csc.txt
new file mode 100644
index 000000000000..cf80d185d429
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-csc.txt
@@ -0,0 +1,35 @@
+Xllinx VPSS Color Space Converter
+-----------------------------------------
+The Xilinx VPSS Color Space Converter is a Video IP that supports
+color space conversion from RGB to YUV 444/422/420 and vice versa.
+
+Required properties:
+
+- compatible: Must be "xlnx,vpss-csc".
+
+- reg: Physical base address and length of registers set for the device.
+
+- xlnx,video-width: This property qualifies the video format with sample
+ width expressed as a number of bits per pixel component. Supported video
+ width values are 8/10/12/16.
+
+-reset-gpios: GPIO specifier to assert/de-assert the reset line.
+
+- clocks: phandle to IP clock.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ Valid range from 64 to 8192.
+
+- xlnx,max-height: Maximum number of lines in a frame.
+ Valid range from 64 to 4320.
+
+Example:
+ csc@a0040000 {
+ compatible = "xlnx,vpss-csc";
+ reg = <0x0 0xa0040000 0x0 0x10000>;
+ reset-gpios = <&gpio 0x0 GPIO_ACTIVE_LOW>;
+ xlnx,video-width = <8>;
+ clocks = <&misc_clk_0>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+ }
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-scaler.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-scaler.txt
new file mode 100644
index 000000000000..8920b81e2779
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-scaler.txt
@@ -0,0 +1,51 @@
+Xilinx VPSS Scaler
+------------------
+The Xilinx VPSS Scaler is a Video IP that supports up scaling,
+down scaling and no scaling functionailty. This supports custom
+resolution values between 0 to 4096.
+
+Required properties:
+
+- compatible: Must be "xlnx,vpss-scaler".
+
+- reg: Physical base address and length of registers set for the device.
+
+- xlnx,num-hori-taps: The number of horizontal taps for scaling filter
+ supported tap values are 2/4/6/8/10/12.
+
+- xlnx,num-vert-taps: The number of vertical taps for scaling filter
+ supported tap values are 2/4/6/8/10/12.
+
+ A value of 2 represents bilinear filters. A value of 4 represents bicubic.
+ Values 6, 8, 10, 12 represent polyphase filters.
+
+- xlnx,pix-per-clk : The pixels per clock property of the IP.
+ supported values are 1 and 2.
+
+- reset-gpios: GPIO specifier to assert/de-assert the reset line.
+
+- clocks: List of phandles to AXI Lite and Video clock
+
+- clock-names: Must contain "aclk_ctrl" and "aclk_axis" in same order as clocks
+ listed in clocks property.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ Valid range from 64 to 8192.
+
+- xlnx,max-height: Maximum number of lines in a frame.
+ Valid range from 64 to 4320.
+
+Example:
+ scaler@a0040000 {
+ compatible = "xlnx,vpss-scaler";
+ reg = <0x0 0xa0000000 0x0 0x40000>;
+ reset-gpios = <&gpio 0x0 GPIO_ACTIVE_LOW>;
+ xlnx,num-hori-taps = <8>;
+ xlnx,num-vert-taps = <8>;
+ xlnx,pix-per-clk = <2>;
+ clock-names = "aclk_ctrl", "aclk_axis";
+ clocks = <&misc_clk_0>, <&misc_clk_1>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+ }
+
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,vtc.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,vtc.txt
new file mode 100644
index 000000000000..6a4d5bcc5e59
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,vtc.txt
@@ -0,0 +1,32 @@
+Device-Tree bindings for Xilinx Video Timing Controller(VTC)
+
+Xilinx VTC is a general purpose video timing generator and detector.
+The input side of this core automatically detects horizontal and
+vertical synchronization, pulses, polarity, blanking timing and active pixels.
+While on the output, it generates the horizontal and vertical blanking and
+synchronization pulses used with a standard video system including support
+for programmable pulse polarity.
+
+The core is commonly used with Video in to AXI4-Stream core to detect the
+format and timing of incoming video data or with AXI4-Stream to Video out core
+to generate outgoing video timing for downstream sinks like a video monitor.
+
+For details please refer to
+https://www.xilinx.com/support/documentation/ip_documentation/v_tc/v6_1/pg016_v_tc.pdf
+
+Required properties:
+ - compatible: value should be "xlnx,bridge-v-tc-6.1"
+ - reg: base address and size of the VTC IP
+ - xlnx,pixels-per-clock: Pixels per clock of the stream. Can be 1, 2 or 4.
+ - clocks: List of phandles for AXI Lite and Video Clock
+ - clock-names: Must contain "s_axi_aclk" and "clk" in same order as clocks listed
+ in clocks property.
+
+Example:
+ v_tc_0: v_tc@80030000 {
+ compatible = "xlnx,bridge-v-tc-6.1";
+ reg = <0x0 0x80030000 0x0 0x10000>;
+ xlnx,pixels-per-clock = <2>;
+ clock-names = "s_axi_aclk", "clk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>;
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.txt
new file mode 100644
index 000000000000..46d0c7671ee5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.txt
@@ -0,0 +1,82 @@
+Xilinx ZynqMP DisplayPort subsystem
+-----------------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,zynqmp-dpsub-1.7".
+
+- reg: Physical base address and length of the registers set for the device.
+- reg-names: Must be "dp", "blend", "av_buf", and "aud" to map logical register
+ partitions.
+
+- interrupts: Interrupt number.
+- interrupts-parent: phandle for interrupt controller.
+
+- clocks: phandles for axi, audio, non-live video, and live video clocks.
+ axi clock is required. Audio clock is optional. If not present, audio will
+ be disabled. One of non-live or live video clock should be present.
+- clock-names: The identification strings are required. "aclk" for axi clock.
+ "dp_aud_clk" for audio clock. "dp_vtc_pixel_clk_in" for non-live video clock.
+ "dp_live_video_in_clk" for live video clock (clock from programmable logic).
+
+- phys: phandles for phy specifier. The number of lanes is configurable
+ between 1 and 2. The number of phandles should be 1 or 2.
+- phy-names: The identifier strings. "dp-phy" followed by index, 0 or 1.
+ For single lane, only "dp-phy0" is required. For dual lane, both "dp-phy0"
+ and "dp-phy1" are required where "dp-phy0" is the primary lane.
+
+- power-domains: phandle for the corresponding power domain
+
+- vid-layer, gfx-layer: Required to represent available layers
+
+Required layer properties
+
+- dmas: phandles for DMA channels as defined in
+ Documentation/devicetree/bindings/dma/dma.txt.
+- dma-names: The identifier strings are required. "gfx0" for graphics layer
+ dma channel. "vid" followed by index (0 - 2) for video layer dma channels.
+
+Optional child node
+
+- The driver populates any child device node in this node. This can be used,
+ for example, to populate the sound device from the DisplayPort subsystem
+ driver.
+
+Example:
+ zynqmp-display-subsystem@fd4a0000 {
+ compatible = "xlnx,zynqmp-dpsub-1.7";
+ reg = <0x0 0xfd4a0000 0x0 0x1000>,
+ <0x0 0xfd4aa000 0x0 0x1000>,
+ <0x0 0xfd4ab000 0x0 0x1000>,
+ <0x0 0xfd4ac000 0x0 0x1000>;
+ reg-names = "dp", "blend", "av_buf", "aud";
+ interrupts = <0 119 4>;
+ interrupt-parent = <&gic>;
+
+ clock-names = "dp_apb_clk", "dp_aud_clk", "dp_live_video_in_clk";
+ clocks = <&dp_aclk>, <&clkc 17>, <&si570_1>;
+
+ phys = <&lane1>, <&lane0>;
+ phy-names = "dp-phy0", "dp-phy1";
+
+ power-domains = <&pd_dp>;
+
+ vid-layer {
+ dma-names = "vid0", "vid1", "vid2";
+ dmas = <&xlnx_dpdma 0>,
+ <&xlnx_dpdma 1>,
+ <&xlnx_dpdma 2>;
+ };
+
+ gfx-layer {
+ dma-names = "gfx0";
+ dmas = <&xlnx_dpdma 3>;
+ };
+
+ dma-names = "vid0", "vid1", "vid2", "gfx0";
+ dmas = <&xlnx_dpdma 0>,
+ <&xlnx_dpdma 1>,
+ <&xlnx_dpdma 2>,
+ <&xlnx_dpdma 3>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/dma/xilinx/axi-cdma.txt b/Documentation/devicetree/bindings/dma/xilinx/axi-cdma.txt
new file mode 100644
index 000000000000..6e5c78c5709c
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/axi-cdma.txt
@@ -0,0 +1,31 @@
+* Xilinx AXI CDMA Test client
+
+Required properties:
+- compatible: Should be "xlnx,axi-cdma-test-1.00.a"
+- dmas: a list of <[CDMA device phandle] [Channel ID]> pairs,
+ where Channel ID is '0' for write/tx and '1' for read/rx
+ channel.
+- dma-names: a list of CDMA channel names, one per "dmas" entry
+
+Example:
+++++++++
+
+cdmatest_0: cdmatest@0 {
+ compatible ="xlnx,axi-cdma-test-1.00.a";
+ dmas = <&axi_cdma_0 0>;
+ dma-names = "cdma";
+} ;
+
+Xilinx AXI CDMA Device Node Example
+++++++++++++++++++++++++++++++++++++
+
+axi_cdma_0: axicdma@7e200000 {
+ compatible = "xlnx,axi-cdma-1.00.a";
+ #dma-cells = <1>;
+ reg = < 0x7e200000 0x10000 >;
+ dma-channel@7e200000 {
+ compatible = "xlnx,axi-cdma-channel";
+ interrupts = < 0 55 4 >;
+ xlnx,datawidth = <0x40>;
+ } ;
+} ;
diff --git a/Documentation/devicetree/bindings/dma/xilinx/axi-dma.txt b/Documentation/devicetree/bindings/dma/xilinx/axi-dma.txt
new file mode 100644
index 000000000000..f4f5b018dfa5
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/axi-dma.txt
@@ -0,0 +1,38 @@
+* Xilinx AXI DMA Test client
+
+Required properties:
+- compatible: Should be "xlnx,axi-dma-test-1.00.a"
+- dmas: a list of <[DMA device phandle] [Channel ID]> pairs,
+ where Channel ID is '0' for write/tx and '1' for read/rx
+ channel.
+- dma-names: a list of DMA channel names, one per "dmas" entry
+
+Example:
+++++++++
+
+dmatest_0: dmatest@0 {
+ compatible ="xlnx,axi-dma-test-1.00.a";
+ dmas = <&axi_dma_0 0
+ &axi_dma_0 1>;
+ dma-names = "axidma0", "axidma1";
+} ;
+
+
+Xilinx AXI DMA Device Node Example
+++++++++++++++++++++++++++++++++++++
+
+axi_dma_0: axidma@40400000 {
+ compatible = "xlnx,axi-dma-1.00.a";
+ #dma-cells = <1>;
+ reg = < 0x40400000 0x10000 >;
+ dma-channel@40400000 {
+ compatible = "xlnx,axi-dma-mm2s-channel";
+ interrupts = < 0 59 4 >;
+ xlnx,datawidth = <0x40>;
+ } ;
+ dma-channel@40400030 {
+ compatible = "xlnx,axi-dma-s2mm-channel";
+ interrupts = < 0 58 4 >;
+ xlnx,datawidth = <0x40>;
+ } ;
+} ;
diff --git a/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt b/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
new file mode 100644
index 000000000000..acdcc445f01b
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
@@ -0,0 +1,67 @@
+* Xilinx PS PCIe Root DMA
+
+Required properties:
+- compatible: Should be "xlnx,ps_pcie_dma-1.00.a"
+- reg: Register offset for Root DMA channels
+- reg-names: Name for the register. Should be "ps_pcie_regbase"
+- interrupts: Interrupt pin for Root DMA
+- interrupt-names: Name for the interrupt. Should be "ps_pcie_rootdma_intr"
+- interrupt-parent: Should be gic in case of zynqmp
+- rootdma: Indicates this platform device is root dma.
+ This is required as the same platform driver will be invoked by pcie end points too
+- dma_vendorid: 16 bit PCIe device vendor id.
+ This can be later used by dma client for matching while using dma_request_channel
+- dma_deviceid: 16 bit PCIe device id
+ This can be later used by dma client for matching while using dma_request_channel
+- numchannels: Indicates number of channels to be enabled for the device.
+ Valid values are from 1 to 4 for zynqmp
+- ps_pcie_channel : One for each channel to be enabled.
+ This array contains channel specific properties.
+ Index 0: Direction of channel
+ Direction of channel can be either PCIe Memory to AXI memory i.e., Host to Card or
+ AXI Memory to PCIe memory i.e., Card to Host
+ PCIe to AXI Channel Direction is represented as 0x1
+ AXI to PCIe Channel Direction is represented as 0x0
+ Index 1: Number of Buffer Descriptors
+ This number describes number of buffer descriptors to be allocated for a channel
+ Index 2: Number of Queues
+ Each Channel has four DMA Buffer Descriptor Queues.
+ By default All four Queues will be managed by Root DMA driver.
+ User may choose to have only two queues either Source and it's Status Queue or
+ Destination and it's Status Queue to be handled by Driver.
+ The other two queues need to be handled by user logic which will not be part of this driver.
+ All Queues on Host is represented by 0x4
+ Two Queues on Host is represented by 0x2
+ Index 3: Coalesce Count
+ This number indicates the number of transfers after which interrupt needs to
+ be raised for the particular channel. The allowed range is from 0 to 255
+ Index 4: Coalesce Count Timer frequency
+ This property is used to control the frequency of poll timer. Poll timer is
+ created for a channel whenever coalesce count value (>= 1) is programmed for the particular
+ channel. This timer is helpful in draining out completed transactions even though interrupt is
+ not generated.
+
+Client Usage:
+ DMA clients can request for these channels using dma_request_channel API
+
+
+Xilinx PS PCIe Root DMA node Example
+++++++++++++++++++++++++++++++++++++
+
+ pci_rootdma: rootdma@fd0f0000 {
+ compatible = "xlnx,ps_pcie_dma-1.00.a";
+ reg = <0x0 0xfd0f0000 0x0 0x1000>;
+ reg-names = "ps_pcie_regbase";
+ interrupts = <0 117 4>;
+ interrupt-names = "ps_pcie_rootdma_intr";
+ interrupt-parent = <&gic>;
+ rootdma;
+ dma_vendorid = /bits/ 16 <0x10EE>;
+ dma_deviceid = /bits/ 16 <0xD021>;
+ numchannels = <0x4>;
+ #size-cells = <0x5>;
+ ps_pcie_channel0 = <0x1 0x7CF 0x4 0x0 0x3E8>;
+ ps_pcie_channel1 = <0x0 0x7CF 0x4 0x0 0x3E8>;
+ ps_pcie_channel2 = <0x1 0x7CF 0x4 0x0 0x3E8>;
+ ps_pcie_channel3 = <0x0 0x7CF 0x4 0x0 0x3E8>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/xilinx/vdmatest.txt b/Documentation/devicetree/bindings/dma/xilinx/vdmatest.txt
new file mode 100644
index 000000000000..5821fdc3e5e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/vdmatest.txt
@@ -0,0 +1,39 @@
+* Xilinx Video DMA Test client
+
+Required properties:
+- compatible: Should be "xlnx,axi-vdma-test-1.00.a"
+- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs,
+ where Channel ID is '0' for write/tx and '1' for read/rx
+ channel.
+- dma-names: a list of DMA channel names, one per "dmas" entry
+- xlnx,num-fstores: Should be the number of framebuffers as configured in
+ VDMA device node.
+
+Example:
+++++++++
+
+vdmatest_0: vdmatest@0 {
+ compatible ="xlnx,axi-vdma-test-1.00.a";
+ dmas = <&axi_vdma_0 0
+ &axi_vdma_0 1>;
+ dma-names = "vdma0", "vdma1";
+ xlnx,num-fstores = <0x8>;
+} ;
+
+
+Xilinx Video DMA Device Node Example
+++++++++++++++++++++++++++++++++++++
+axi_vdma_0: axivdma@44A40000 {
+ compatible = "xlnx,axi-vdma-1.00.a";
+ ...
+ dma-channel@44A40000 {
+ ...
+ xlnx,num-fstores = <0x8>;
+ ...
+ } ;
+ dma-channel@44A40030 {
+ ...
+ xlnx,num-fstores = <0x8>;
+ ...
+ } ;
+} ;
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
index 93b6d961dd4f..613d775658a2 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
@@ -15,7 +15,7 @@ Required properties:
- compatible: Should be "xlnx,axi-vdma-1.00.a" or "xlnx,axi-dma-1.00.a" or
"xlnx,axi-cdma-1.00.a""
- #dma-cells: Should be <1>, see "dmas" property below
-- reg: Should contain VDMA registers location and length.
+- reg: Should contain DMA registers location and length.
- xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits).
- dma-ranges: Should be as the following <dma_addr cpu_addr max_len>.
- dma-channel child node: Should have at least one channel and can have up to
@@ -38,11 +38,10 @@ Required properties for VDMA:
- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
Optional properties for AXI DMA:
-- xlnx,sg-length-width: Should be set to the width in bits of the length
- register as configured in h/w. Takes values {8...26}. If the property
- is missing or invalid then the default value 23 is used. This is the
- maximum value that is supported by all IP versions.
-- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware.
+- xlnx,sg-length-width: Should be set to the width of buffer length register as
+ configured in hardware. If this property is missing or has invalid width
+ i.e not in range of 8-26, maximum buffer length width (common to all IP
+ versions) 23 bits is used.
Optional properties for VDMA:
- xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
It takes following values:
@@ -69,15 +68,13 @@ Optional child node properties for VDMA:
enabled/disabled in hardware.
- xlnx,enable-vert-flip: Tells vertical flip is
enabled/disabled in hardware(S2MM path).
-Optional child node properties for AXI DMA:
--dma-channels: Number of dma channels in child node.
Example:
++++++++
axi_vdma_0: axivdma@40030000 {
compatible = "xlnx,axi-vdma-1.00.a";
- #dma_cells = <1>;
+ #dma-cells = <1>;
reg = < 0x40030000 0x10000 >;
dma-ranges = <0x00000000 0x00000000 0x40000000>;
xlnx,num-fstores = <0x8>;
@@ -104,7 +101,8 @@ axi_vdma_0: axivdma@40030000 {
Required properties:
- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs,
where Channel ID is '0' for write/tx and '1' for read/rx
- channel.
+ channel if both channels are enabled.
+ If only one channel is enabled either tx or rx the Channel ID is '0'.
- dma-names: a list of DMA channel names, one per "dmas" entry
Example:
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dpdma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dpdma.txt
new file mode 100644
index 000000000000..5f1e680ffcc2
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dpdma.txt
@@ -0,0 +1,91 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort Subsystem
+
+The ZynqMP DisplayPort subsystem handles DMA channel buffer management,
+blending, and audio mixing. The DisplayPort subsystem receives display
+and audio frames from DPDMA and transmits output to the DisplayPort IP core.
+
+Required properties:
+ - compatible: Should be "xlnx,dpdma".
+ - reg: Base address and size of the IP core.
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - clocks: phandle for AXI clock
+ - clock-names: The identification string, "axi_clk", is always required.
+
+Required child node properties:
+- compatible: Should be one of "xlnx,video0", "xlnx,video1", "xlnx,video2",
+ "xlnx,graphics", "xlnx,audio0", or "xlnx,audio1".
+
+Example:
+
+ xlnx_dpdma: axidpdma@43c10000 {
+ compatible = "xlnx,dpdma";
+ reg = <0x43c10000 0x1000>;
+ interrupts = <0 54 4>;
+ interrupt-parent = <&intc>;
+ clocks = <&clkc 16>;
+ clock-names = "axi_clk";
+ xlnx,axi-clock-freq = <200000000>;
+
+ dma-channels = <6>;
+
+ #dma-cells = <1>;
+ dma-video0channel@43c10000 {
+ compatible = "xlnx,video0";
+ };
+ dma-video1channel@43c10000 {
+ compatible = "xlnx,video1";
+ };
+ dma-video2channel@43c10000 {
+ compatible = "xlnx,video2";
+ };
+ dma-graphicschannel@43c10000 {
+ compatible = "xlnx,graphics";
+ };
+ dma-audio0channel@43c10000 {
+ compatible = "xlnx,audio0";
+ };
+ dma-audio1channel@43c10000 {
+ compatible = "xlnx,audio1";
+ };
+ };
+
+* DMA client
+
+Required properties:
+- dmas: a list of <[DPDMA device phandle] [Channel ID]> pairs. "Channel ID"
+ is defined as video0 = 0, video1 = 1, video2 = 2, graphics = 3, audio0 = 4,
+ and audio1 = 5.
+
+Example:
+
+ xilinx_drm {
+ compatible = "xlnx,drm";
+ xlnx,encoder-slave = <&xlnx_dp>;
+ clocks = <&si570 0>;
+ xlnx,connector-type = "DisplayPort";
+ xlnx,dp-sub = <&xlnx_dp_sub>;
+ planes {
+ xlnx,pixel-format = "rgb565";
+ plane0 {
+ dmas = <&xlnx_dpdma 3>;
+ dma-names = "dma";
+ };
+ plane1 {
+ dmas = <&xlnx_dpdma 0>;
+ dma-names = "dma";
+ };
+ };
+ };
+
+ xlnx_dp_snd_pcm0: dp_snd_pcm0 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 4>;
+ dma-names = "tx";
+ };
+
+ xlnx_dp_snd_pcm1: dp_snd_pcm1 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 5>;
+ dma-names = "tx";
+ };
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt
new file mode 100644
index 000000000000..39cb6ff762c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt
@@ -0,0 +1,123 @@
+The Xilinx framebuffer DMA engine supports two soft IP blocks: one IP
+block is used for reading video frame data from memory (FB Read) to the device
+and the other IP block is used for writing video frame data from the device
+to memory (FB Write). Both the FB Read/Write IP blocks are aware of the
+format of the data being written to or read from memory including RGB and
+YUV in packed, planar, and semi-planar formats. Because the FB Read/Write
+is format aware, only one buffer pointer is needed by the IP blocks even
+when planar or semi-planar format are used.
+
+FB Read Required propertie(s):
+- compatible : Should be "xlnx,axi-frmbuf-rd-v2.1". Older string
+ "xlnx,axi-frmbuf-rd-v2" is now deprecated.
+
+Note: Compatible string "xlnx,axi-frmbuf-rd" and the hardware it
+represented is no longer supported.
+
+FB Write Required propertie(s):
+- compatible : Should be "xlnx,axi-frmbuf-wr-v2.1". Older string
+ "xlnx,axi-frmbuf-wr-v2" is now deprecated.
+
+Note: Compatible string "xlnx,axi-frmbuf-wr" and the hardware it
+represented is no longer supported.
+
+Required Properties Common to both FB Read and FB Write:
+- #dma-cells : should be 1
+- interrupt-parent : Interrupt controller the interrupt is routed through
+- interrupts : Should contain DMA channel interrupt
+- reset-gpios : Should contain GPIO reset phandle
+- reg : Memory map for module access
+- xlnx,dma-addr-width : Size of dma address pointer in IP (either 32 or 64)
+- xlnx,vid-formats : A list of strings indicating what video memory
+ formats the IP has been configured to support.
+ See VIDEO FORMATS table below and examples.
+
+Required Properties Common to both FB Read and FB Write for v2.1:
+- xlnx,pixels-per-clock : Pixels per clock set in IP (1, 2 or 4)
+- clocks: Reference to the AXI Streaming clock feeding the AP_CLK
+- clock-names: Must have "ap_clk"
+
+Optional Properties Common to both FB Read and FB Write for v2.1:
+- xlnx,dma-align : DMA alignment required in bytes.
+ If absent then dma alignment is calculated as
+ pixels per clock * 8.
+ If present it should be power of 2 and at least
+ pixels per clock * 8.
+ Minimum is 8, 16, 32 when pixels-per-clock is
+ 1, 2 or 4.
+- xlnx,fid : Field ID enabled for interlaced video support.
+ Can be absent for progressive video.
+
+Optional properties:
+- xlnx,max-height : Maximum number of lines.
+- xlnx,max-width : Maximum number of pixels in a line.
+
+VIDEO FORMATS
+The following table describes the legal string values to be used for
+the xlnx,vid-formats property. To the left is the string value and the
+two columns to the right describe how this is mapped to an equivalent V4L2
+and DRM fourcc code---respectively---by the driver.
+
+IP FORMAT DTS String V4L2 Fourcc DRM Fourcc
+-------------|----------------|----------------------|---------------------
+RGB8 bgr888 V4L2_PIX_FMT_RGB24 DRM_FORMAT_BGR888
+BGR8 rgb888 V4L2_PIX_FMT_BGR24 DRM_FORMAT_RGB888
+RGBX8 xbgr8888 V4L2_PIX_FMT_BGRX32 DRM_FORMAT_XBGR8888
+RGBA8 abgr8888 <not supported> DRM_FORMAT_ABGR8888
+BGRA8 argb8888 <not supported> DRM_FORMAT_ARGB8888
+BGRX8 xrgb8888 V4L2_PIX_FMT_XBGR32 DRM_FORMAT_XRGB8888
+RGBX10 xbgr2101010 V4L2_PIX_FMT_XBGR30 DRM_FORMAT_XBGR2101010
+RGBX12 xbgr2121212 V4L2_PIX_FMT_XBGR40 <not supported>
+RGBX16 rgb16 V4L2_PIX_FMT_BGR40 <not supported>
+YUV8 vuy888 V4L2_PIX_FMT_VUY24 DRM_FORMAT_VUY888
+YUVX8 xvuy8888 V4L2_PIX_FMT_XVUY32 DRM_FORMAT_XVUY8888
+YUYV8 yuyv V4L2_PIX_FMT_YUYV DRM_FORMAT_YUYV
+UYVY8 uyvy V4L2_PIX_FMT_UYVY DRM_FORMAT_UYVY
+YUVA8 avuy8888 <not supported> DRM_FORMAT_AVUY
+YUVX10 yuvx2101010 V4L2_PIX_FMT_XVUY10 DRM_FORMAT_XVUY2101010
+Y8 y8 V4L2_PIX_FMT_GREY DRM_FORMAT_Y8
+Y10 y10 V4L2_PIX_FMT_Y10 DRM_FORMAT_Y10
+Y_UV8 nv16 V4L2_PIX_FMT_NV16 DRM_FORMAT_NV16
+Y_UV8 nv16 V4L2_PIX_FMT_NV16M DRM_FORMAT_NV16
+Y_UV8_420 nv12 V4L2_PIX_FMT_NV12 DRM_FORMAT_NV12
+Y_UV8_420 nv12 V4L2_PIX_FMT_NV12M DRM_FORMAT_NV12
+Y_UV10 xv20 V4L2_PIX_FMT_XV20M DRM_FORMAT_XV20
+Y_UV10 xv20 V4L2_PIX_FMT_XV20 <not supported>
+Y_UV10_420 xv15 V4L2_PIX_FMT_XV15M DRM_FORMAT_XV15
+Y_UV10_420 xv15 V4L2_PIX_FMT_XV20 <not supported>
+
+Examples:
+
+FB Read Example:
+++++++++
+v_frmbuf_rd_0: v_frmbuf_rd@80000000 {
+ #dma-cells = <1>;
+ compatible = "xlnx,axi-frmbuf-rd-v2.1";
+ interrupt-parent = <&gic>;
+ interrupts = <0 92 4>;
+ reset-gpios = <&gpio 80 1>;
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,dma-addr-width = <32>;
+ xlnx,vid-formats = "bgr888","xbgr8888";
+ xlnx,pixels-per-clock = <1>;
+ xlnx,dma-align = <8>;
+ clocks = <&vid_stream_clk>;
+ clock-names = "ap_clk"
+};
+
+FB Write Example:
+++++++++
+v_frmbuf_wr_0: v_frmbuf_wr@80000000 {
+ #dma-cells = <1>;
+ compatible = "xlnx,axi-frmbuf-wr-v2.1";
+ interrupt-parent = <&gic>;
+ interrupts = <0 92 4>;
+ reset-gpios = <&gpio 80 1>;
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,dma-addr-width = <64>;
+ xlnx,vid-formats = "bgr888","yuyv","nv16","nv12";
+ xlnx,pixels-per-clock = <2>;
+ xlnx,dma-align = <16>;
+ clocks = <&vid_stream_clk>;
+ clock-names = "ap_clk"
+};
diff --git a/Documentation/devicetree/bindings/drm/xilinx/cresample.txt b/Documentation/devicetree/bindings/drm/xilinx/cresample.txt
new file mode 100644
index 000000000000..177ab58bfc9d
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/cresample.txt
@@ -0,0 +1,22 @@
+Device-Tree bindings for Xilinx Chroma Resampler(CRESAMPLE)
+
+Xilinx CRESAMPLE provides the chroma resampling of YUV formats.
+
+Required properties:
+ - compatible: value should be "xlnx,v-cresample-3.01.a"
+ - reg: base address and size of the CRESAMPLE IP
+ - xlnx,input-format, xlnx,output-format: the input/output video formats of
+ CRESAMPLE. The value should be one of following format strings.
+
+ yuv422
+ yuv444
+ yuv420
+
+Example:
+
+ v_cresample_0: v-cresample@40020000 {
+ compatible = "xlnx,v-cresample-3.01.a";
+ reg = <0x40020000 0x10000>;
+ xlnx,input-format = "yuv444";
+ xlnx,output-format = "yuv422";
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/dp.txt b/Documentation/devicetree/bindings/drm/xilinx/dp.txt
new file mode 100644
index 000000000000..09485a46d78b
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/dp.txt
@@ -0,0 +1,65 @@
+Device-Tree bindings for Xilinx DisplayPort IP core
+
+The IP core supports transmission of video data in DisplayPort protocol.
+
+Required properties:
+ - compatible: Should be "xlnx,v-dp".
+ - reg: Base address and size of the IP core.
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - clocks: phandle for aclk and audio clock
+ - clock-names: The identification string, "aclk", is always required for
+ the axi clock. "aud_clk" is required only when audio needs to be enabled.
+
+ - xlnx,dp-version: Version of DisplayPort protocol.
+ - xlnx,max-lanes: Maximum number of lanes of the IP core. The value should
+ be one of 1, 2, or 4.
+ - xlnx,max-link-rate: Maximum link rate of the IP core. The value should be
+ one of 162000, 270000, or 540000.
+ - xlnx,max-bpc: Maximum bits-per-color. The value should be one of 8, 10, 12,
+ or 16.
+ - xlnx,axi-clock: Clock rate of axi4-lite. This is required to provide
+ the correct clock divider for AUX.
+
+ - xlnx,colormetry: Color format. The value should be one of "rgb", "ycrcb422",
+ "ycrcb444", or "yonly". These are based on the DisplayPort specification.
+ - xlnx,bpc: bits-per-color value to be configured. The value should be one of
+ 6, 8, 10, 12, or 16.
+
+Optional properties:
+ - xlnx,enable-yonly: Enable yonly colormetry.
+ - xlnx,enable-ycrcb: Enable ycrcb colormetry.
+ - xlnx,enable-sync: Enable synchronous operation with video clock.
+ - xlnx,max-pclock-frequency: Maximum pixel clock rate(KHz). The value should
+ specify the maximum pixel clock rate in KHz that the IP core supports.
+ - xlnx,dp-sub: A phandle referencing the ZynqMP DisplayPort subsystem
+ which contains additional blocks such as buffer managers, blender, and audio.
+ - phy-names: Names for each phy lane. The name should be 'dp-phy' with lane
+ number. For example, 'dp-phy0', 'dp-phy1'. 'dp-phy0' should be the primary
+ lane, and used for PLL lock.
+ - phys: The phy phandles for each lane. This should follow the phy-zynqmp
+ definition in Documentation/devicetree/bindings/phy/phy-zynqmp.txt
+
+Example:
+
+ xlnx_dp: dp@83c10000 {
+ compatible = "xlnx,v-dp";
+ reg = <0x83c10000 0x10000>;
+ interrupts = <0 57 4>;
+ interrupt-parent = <&ps7_scugic_0>;
+ clocks = <&dp_aclk>, <&dp_aud_clk>;
+ clock-names = "aclk", "aud_clk";
+
+ xlnx,dp-version = "v1.2";
+ xlnx,max-lanes = <4>;
+ xlnx,max-link-rate = <270000>;
+ xlnx,max-bpc = <16>;
+ xlnx,max-pclock-frequency = <150000>;
+ xlnx,enable-ycrcb;
+
+ xlnx,colormetry = "rgb";
+ xlnx,bpc = <8>;
+
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>, <&lane0 PHY_TYPE_DP 1 3 27000000>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/dp_sub.txt b/Documentation/devicetree/bindings/drm/xilinx/dp_sub.txt
new file mode 100644
index 000000000000..678235355caa
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/dp_sub.txt
@@ -0,0 +1,65 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort Subsystem
+
+The ZynqMP DisplayPort subsystem handles DMA channel buffer management,
+blending, and audio mixing. The DisplayPort subsystem receives display
+and audio frames from DPDMA and transmits output to the DisplayPort IP core.
+
+Required properties:
+ - compatible: Should be one of "xlnx,dp-sub".
+ - reg: Base address and size of the IP core.
+ - reg-names: "blend", "av_buf", and "aud" for Blender, AV Buffer manager, and
+ Audio modules respectively.
+ - xlnx,output-fmt: Output color format. The value should be one of "rgb",
+ "ycrcb422", "ycrcb444", or "yonly". These are based on the DisplayPort
+ specification. The value shall be synced with DP colormetry..
+
+Optional properties:
+ - xlnx,vid-fmt: Video input color format. The value should be one of
+ "vyuy"
+ "uyvy"
+ "yuyv"
+ "yvyu"
+ "yuv422"
+ "yvu422"
+ "yuv444"
+ "yvu444"
+ "nv16"
+ "nv61
+ "bgr888"
+ "rgb888"
+ "xbgr8888"
+ "xrgb8888"
+ "xbgr2101010"
+ "xrgb2101010"
+ "yuv420"
+ "yvu420"
+ "nv12"
+ "nv21".
+ If nothing is specified, "vyuy" will be selected.
+ - xlnx,gfx-fmt: Graphics input color format. The value should be one of
+ "abgr8888"
+ "argb8888"
+ "rgba8888"
+ "bgra8888"
+ "bgr888"
+ "rgb888"
+ "rgba5551"
+ "bgra5551"
+ "rgba4444"
+ "bgra4444"
+ "rgb565"
+ "bgr565".
+ If nothing is specified, "abgr8888" will be selected.
+
+Optional properties:
+ - xlnx,vid-clk-pl: Should be used when the pixel clock is coming from PL.
+
+Example:
+
+ xlnx_dp_sub: dp_sub@43c0a000 {
+ compatible = "xlnx,dp-sub";
+ reg = <0x43c0a000 0x1000>, <0x43c0b000 0x1000>,
+ <0x43c0c000 0x1000>;
+ reg-names = "blend", "av_buf", "aud";
+ xlnx,output-fmt = "rgb";
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/dsi.txt b/Documentation/devicetree/bindings/drm/xilinx/dsi.txt
new file mode 100644
index 000000000000..f56db6a0d95b
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/dsi.txt
@@ -0,0 +1,61 @@
+Device-Tree bindings for Xilinx MIPI DSI Tx IP core
+
+The IP core supports transmission of video data in MIPI DSI protocol.
+
+Required properties:
+ - compatible: Should be "xlnx,mipi-dsi-tx-subsystem".
+ - reg: Base address and size of the IP core.
+ - xlnx,dsi-datatype: Color format. The value should be one of "MIPI_DSI_FMT_RGB888",
+ "MIPI_DSI_FMT_RGB666", "MIPI_DSI_FMT_RGB666_PACKED" or "MIPI_DSI_FMT_RGB565".
+ - simple_panel: The subnode for connected panel. This represents the
+ DSI peripheral connected to the DSI host node. please refer to
+ Documentation/devicetree/bindings/display/mipi-dsi-bus.txt. The
+ simple-panel driver has auo,b101uan01 panel timing parameters added along
+ with other existing panels. DSI driver derive the required Tx IP controller
+ timing values from the panel timing parameters. Refer to the
+ xilinx_dsi_mode_set() in the DSI driver on how to derive the DSI
+ Tx controller timing paramters.
+ - ports: Connects to the drm device node through device graph binding.
+ The port should contain a 'remote-endpoint' subnode that points to the
+ endpoint in the port of the drm device node. Refer to
+ Documentation/devicetree/bindings/graph.txt.
+ - xlnx,dsi-num-lanes: Possible number of DSI lanes for the Tx controller.
+ The values should be 1, 2, 3 or 4. Based on xlnx,dsi-num-lanes and
+ line rate for the MIPI D-PHY core in Mbps, the AXI4-stream received by
+ Xilinx MIPI DSI Tx IP core adds markers as per DSI protocol and the packet
+ thus framed is convered to serial data by MIPI D-PHY core. Please refer
+ Xilinx pg238 for more details. This value should be equal to the number
+ of lanes supported by the connected DSI panel. Panel has to support this
+ value or has to be programmed to the same value that DSI Tx controller is
+ configured to.
+
+Required simple_panel properties:
+ - compatible: Value should be one of the panel name mentioned in the
+ of_match_table of simple panel driver drivers/gpu/drm/panel/panel-simple.c
+ e.g. "auo,b101uan01".
+
+Example:
+
+#include <dt-bindings/drm/mipi-dsi.h>
+ mipi_dsi_tx_subsystem_0: mipi_dsi_tx_subsystem@80000000 {
+ compatible = "xlnx,mipi-dsi-tx-subsystem";
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,dsi-num-lanes = <4>;
+ xlnx,dsi-data-type = <MIPI_DSI_FMT_RGB888>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ mipi_port: endpoint {
+ remote-endpoint = <&drm_port>;
+ };
+ };
+ };
+ simple_panel: simple-panel@0 {
+ compatible = "auo,b101uan01";
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/osd.txt b/Documentation/devicetree/bindings/drm/xilinx/osd.txt
new file mode 100644
index 000000000000..9f30706af15d
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/osd.txt
@@ -0,0 +1,19 @@
+Device-Tree bindings for Xilinx Video On Screen Display(OSD)
+
+Xilinx OSD provides the multiplane support. Some properties can be configured
+in IP synthesis.
+
+Required properties:
+ - compatible: value should be "xlnx,v-osd-5.01.a"
+ - reg: base address and size of the OSD IP
+ - xlnx,num-layers: the number of layers(up to 8) supported by OSD
+ - xlnx,screen-width: the maximum size(up to 4096) of screen pixel width by OSD
+
+Example:
+
+ v_osd_0: v-osd@40040000 {
+ compatible = "xlnx,v-osd-5.01.a";
+ reg = <0x40040000 0x10000>;
+ xlnx,num-layers = <2>;
+ xlnx,screen-width = <1920>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/rgb2ycrcb.txt b/Documentation/devicetree/bindings/drm/xilinx/rgb2ycrcb.txt
new file mode 100644
index 000000000000..6d801d0426d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/rgb2ycrcb.txt
@@ -0,0 +1,14 @@
+Device-Tree bindings for Xilinx RGB to YCrCb convertor(RGB2YCRCB)
+
+Xilinx RGB2YCRCB converts the pixel format from RGB to YCrCb
+
+Required properties:
+ - compatible: value should be "xlnx,v-rgb2ycrcb-6.01.a"
+ - reg: base address and size of the RGB2YCRCB IP
+
+Example:
+
+ v_rgb2ycrcb_0: v-rgb2ycrcb@40030000 {
+ compatible = "xlnx,v-rgb2ycrcb-6.01.a";
+ reg = <0x40030000 0x10000>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/sdi.txt b/Documentation/devicetree/bindings/drm/xilinx/sdi.txt
new file mode 100644
index 000000000000..ceb5340364bb
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/sdi.txt
@@ -0,0 +1,34 @@
+Device-Tree bindings for Xilinx SDI Tx IP core
+
+The IP core supports transmission of video data in SDI Tx: protocol.
+
+Required properties:
+ - compatible: Should be "xlnx,v-smpte-uhdsdi-tx-ss".
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - reg: Base address and size of the IP core.
+ - ports: Connects to the drm device node through device graph binding.
+ The port should contain a 'remote-endpoint' subnode that points to the
+ endpoint in the port of the drm device node. Refer to
+ Documentation/devicetree/bindings/graph.txt.
+ - xlnx,vtc: vtc phandle
+
+Example:
+
+ v_smpte_uhdsdi_tx_ss: v_smpte_uhdsdi_tx_ss@80020000 {
+ compatible = "xlnx,v-smpte-uhdsdi-tx-ss";
+ interrupt-parent = <&gic>;
+ interrupts = <0 90 4>;
+ reg = <0x0 0x80020000 0x0 0x10000>;
+ xlnx,vtc = <&v_tc_0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ sdi_port: endpoint {
+ remote-endpoint = <&drm_port>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/vtc.txt b/Documentation/devicetree/bindings/drm/xilinx/vtc.txt
new file mode 100644
index 000000000000..13309048e789
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/vtc.txt
@@ -0,0 +1,18 @@
+Device-Tree bindings for Xilinx Video Timing Controller(VTC)
+
+Xilinx VTC provides the timings for Video IPs.
+
+Required properties:
+ - compatible: value should be "xlnx,v-tc-5.01.a"
+ - reg: base address and size of the VTC IP
+ - interrupts: the interrupt number
+ - interrupts-parent: the phandle for interrupt controller
+
+Example:
+
+ v_tc_0: v-tc@40010000 {
+ compatible = "xlnx,v-tc-5.01.a";
+ interrupt-parent = <&intc>;
+ interrupts = <0 54 4>;
+ reg = <0x40010000 0x10000>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/xilinx/xilinx_drm.txt b/Documentation/devicetree/bindings/drm/xilinx/xilinx_drm.txt
new file mode 100644
index 000000000000..25859d47ba85
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/xilinx/xilinx_drm.txt
@@ -0,0 +1,162 @@
+Device-Tree bindings for Xilinx DRM
+
+Xilinx DRM supports the display pipelines with Xilinx soft IPs on FPGA and
+IPs on Xilinx boards.
+
+The example hardware pipeline is depicted below
+(*IPs in parentheses() are optional. IPs in brackets[] don't require drivers).
+vdma-[remap]-(rgb2yuv)-(cresample)-(osd)-(rgb2yuv)-(cresample)-[axi2vid]-adv7511
+(vdma-[remap]-(rgb2yuv)-(cresample)-|) |
+ si570 -> vtc
+
+Required properties:
+ - compatible: value should be "xlnx,drm".
+ - xlnx,osd: the phandle for on screen display IP if used in the hardware design
+ - xlnx,rgb2yuv: the phandle for rgb2ycrcb IP if used in the hardware design
+ - xlnx,cresample: the phandle for chroma resampler IP if used in the hardware
+ design
+ - xlnx,vtc: the phandle for video timing controller IP
+ - xlnx,encoder-slave: the phandle for the encoder slave.
+ - clocks: the phandle for the pixel clock
+ - planes: the subnode for resources for each plane
+ - xlnx,connector-type: the type of connector. The value should be one of
+ "HDMIA" or "DisplayPort" depending on which connector type to be used.
+
+Optional properties:
+ - xlnx,dp-sub: the phandle to DisplayPort subsystem node for ZynqMP.
+ - xlnx,sdi: the phandle to SDI node if the pipeline has the SDI IP core.
+ - ports: device graph binding can be used to define connectivity. The DT
+ bindings are defined in Documentation/devicetree/bindings/graph.txt.
+
+Required planes properties:
+ - xlnx,pixel-format: the format of plane manager. The value should be one of
+ following format strings.
+
+ "yuv420"
+ "uvy422"
+ "vuy422"
+ "yuv422"
+ "yv4u22"
+ "yuv444"
+ "nv12"
+ "nv21"
+ "nv16"
+ "nv61"
+ "abgr1555"
+ "argb1555"
+ "rgba4444"
+ "bgra4444"
+ "bgr565"
+ "rgb565"
+ "bgr888"
+ "rgb888"
+ "xbgr8888"
+ "xrgb8888"
+ "abgr8888"
+ "argb8888"
+ "bgra8888"
+ "rgba8888"
+
+Required plane properties:
+ - dmas: the phandle list of DMA specifiers
+ - dma-names: the identifier strings for DMAs.
+ - xlnx,rgb2yuv: the phandle for rgb2ycrcb IP if used for plane
+ - xlnx,cresample: the phandle for chroma resampler IP if used for plane
+
+The pipeline can be configured as following examples or more.
+ - Example 1:
+vdma - [remap] - rgb2yuv - cresample - [axi2vid] - adv7511
+ |
+ si570 - vtc
+ xilinx_drm {
+ compatible = "xlnx,drm";
+ xlnx,vtc = <&v_tc_0>;
+ xlnx,encoder-slave = <&adv7511>;
+ xlnx,connector-type = "HDMIA";
+ clocks = <&si570>;
+ planes {
+ xlnx,pixel-format = "yuv422";
+ plane0 {
+ dma = <&axi_vdma_0>;
+ dma-names = "axi_vdma_0";
+ xlnx,rgb2yuv = <&v_rgb2ycrcb_0>;
+ xlnx,cresample = <&v_cresample_0>;
+ };
+ };
+ };
+
+ - Example 2:
+vdma - [remap] --------- osd - cresample - [axi2vid] - adv7511
+vdma - [remap] - rgb2yuv -| |
+ si570 - vtc
+
+ xilinx_drm {
+ compatible = "xlnx,drm";
+ xlnx,osd = <&v_osd_0>;
+ xlnx,cresample = <&v_cresample_0>;
+ xlnx,vtc = <&v_tc_0>;
+ xlnx,encoder-slave = <&adv7511>;
+ xlnx,connector-type = "DisplayPort";
+ clocks = <&si570>;
+ planes {
+ xlnx,pixel-format = "yuv422";
+ plane0 {
+ dma = <&axi_vdma_0>;
+ dma-names = "axi_vdma_0";
+ };
+ plane1 {
+ dma = <&axi_vdma_1>;
+ dma-names = "axi_vdma_1";
+ xlnx,rgb2yuv = <&v_rgb2ycrcb_0>;
+ };
+ };
+ };
+
+ - Example 3:
+dpdma - ZynqMP DP subsystem - DP
+
+ xilinx_drm {
+ compatible = "xlnx,drm";
+ xlnx,encoder-slave = <&xlnx_dp>;
+ clocks = <&si570 0>;
+ xlnx,connector-type = "DisplayPort";
+ xlnx,dp-sub = <&xlnx_dp_sub>;
+ planes {
+ xlnx,pixel-format = "rgb565";
+ plane0 {
+ dmas = <&xlnx_dpdma 3>;
+ dma-names = "xlnx_dpdma";
+ };
+ plane1 {
+ dmas = <&xlnx_dpdma 0>,
+ <&xlnx_dpdma 1>,
+ <&xlnx_dpdma 2>;
+ dma-names = "xlnx_dpdma_0",
+ "xlnx_dpdma_1",
+ "xlnx_dpdma_2";
+ };
+ };
+ };
+
+- Example 4:
+vdma - Xilinx MIPI DSI
+ xilinx_drm: xilinx_drm {
+ compatible = "xlnx,drm";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ drm_port: endpoint {
+ remote-endpoint = <&mipi_port>;
+ };
+ };
+ };
+ planes {
+ xlnx,pixel-format = "rgb888";
+ plane0 {
+ dmas = <&axi_vdma_0 0>;
+ dma-names = "axi_vdma_0";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/drm/zocl/zocl_drm.txt b/Documentation/devicetree/bindings/drm/zocl/zocl_drm.txt
new file mode 100644
index 000000000000..bb9e30af4afc
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/zocl/zocl_drm.txt
@@ -0,0 +1,13 @@
+Binding for ZynQ OpenCL DRM driver
+
+Required properties:
+- compatible: should contain "xlnx,zocl"
+- reg: base address and size for memory mapped control port for opencl kernel
+
+Example:
+
+ zocl_drm {
+ compatible = "xlnx,zocl";
+ status = "okay";
+ reg = <0x80000000 0x10000>;
+ };
diff --git a/Documentation/devicetree/bindings/edac/cortex-arm64-edac.txt b/Documentation/devicetree/bindings/edac/cortex-arm64-edac.txt
new file mode 100644
index 000000000000..552f0c7774b1
--- /dev/null
+++ b/Documentation/devicetree/bindings/edac/cortex-arm64-edac.txt
@@ -0,0 +1,15 @@
+* ARM Cortex A57 and A53 L1/L2 cache error reporting
+
+CPU Memory Error Syndrome and L2 Memory Error Syndrome registers can be used
+for checking L1 and L2 memory errors.
+
+The following section describes the Cortex A57/A53 EDAC DT node binding.
+
+Required properties:
+- compatible: Should be "arm,cortex-a57-edac" or "arm,cortex-a53-edac"
+
+Example:
+ edac {
+ compatible = "arm,cortex-a57-edac";
+ };
+
diff --git a/Documentation/devicetree/bindings/edac/pl310_edac_l2.txt b/Documentation/devicetree/bindings/edac/pl310_edac_l2.txt
new file mode 100644
index 000000000000..94fbb8da2d1b
--- /dev/null
+++ b/Documentation/devicetree/bindings/edac/pl310_edac_l2.txt
@@ -0,0 +1,19 @@
+Pl310 L2 Cache EDAC driver, it does reports the data and tag ram parity errors.
+
+Required properties:
+- compatible: Should be "arm,pl310-cache".
+- intterupts: Interrupt number to the cpu.
+- reg: Physical base address and size of cache controller's memory mapped
+ registers
+
+Example:
+++++++++
+
+ L2: cache-controller {
+ compatible = "arm,pl310-cache";
+ interrupts = <0 2 4>;
+ reg = <0xf8f02000 0x1000>;
+ };
+
+PL310 L2 Cache EDAC driver detects the Parity enable state by reading the
+appropriate control register.
diff --git a/Documentation/devicetree/bindings/edac/zynqmp_ocm_edac.txt b/Documentation/devicetree/bindings/edac/zynqmp_ocm_edac.txt
new file mode 100644
index 000000000000..252bb96bee90
--- /dev/null
+++ b/Documentation/devicetree/bindings/edac/zynqmp_ocm_edac.txt
@@ -0,0 +1,18 @@
+Xilinx ZynqMP OCM EDAC driver, it does reports the OCM ECC single bit errors
+that are corrected and double bit ecc errors that are detected by the OCM
+ECC controller.
+
+Required properties:
+- compatible: Should be "xlnx,zynqmp-ocmc-1.0".
+- reg: Should contain OCM controller registers location and length.
+- interrupt-parent: Should be core interrupt controller.
+- interrupts: Property with a value describing the interrupt number.
+
+Example:
+++++++++
+ocm: memory-controller@ff960000 {
+ compatible = "xlnx,zynqmp-ocmc-1.0";
+ reg = <0x0 0xff960000 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 10 4>;
+};
diff --git a/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt b/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt
index a4fe136be2ba..56ec638f3976 100644
--- a/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt
+++ b/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt
@@ -11,7 +11,9 @@ power management service, FPGA service and other platform management
services.
Required properties:
- - compatible: Must contain: "xlnx,zynqmp-firmware"
+ - compatible: Must contain any of below:
+ "xlnx,zynqmp-firmware" for Zynq Ultrascale+ MPSoC
+ "xlnx,versal-firmware-wip" for Versal
- method: The method of calling the PM-API firmware layer.
Permitted values are:
- "smc" : SMC #0, following the SMCCC
@@ -21,6 +23,8 @@ Required properties:
Example
-------
+Zynq Ultrascale+ MPSoC
+----------------------
firmware {
zynqmp_firmware: zynqmp-firmware {
compatible = "xlnx,zynqmp-firmware";
@@ -28,3 +32,13 @@ firmware {
...
};
};
+
+Versal
+------
+firmware {
+ versal_firmware: versal-firmware {
+ compatible = "xlnx,versal-firmware-wip";
+ method = "smc";
+ ...
+ };
+};
diff --git a/Documentation/devicetree/bindings/fpga/fpga-region.txt b/Documentation/devicetree/bindings/fpga/fpga-region.txt
index 90c44694a30b..f64d815ec75e 100644
--- a/Documentation/devicetree/bindings/fpga/fpga-region.txt
+++ b/Documentation/devicetree/bindings/fpga/fpga-region.txt
@@ -196,6 +196,7 @@ Optional properties:
- config-complete-timeout-us : The maximum time in microseconds time for the
FPGA to go to operating mode after the region has been programmed.
- child nodes : devices in the FPGA after programming.
+- resets : Phandle and reset specifier for this region
In the example below, when an overlay is applied targeting fpga-region0,
fpga_mgr is used to program the FPGA. Two bridges are controlled during
diff --git a/Documentation/devicetree/bindings/fpga/xlnx,afi-fpga.txt b/Documentation/devicetree/bindings/fpga/xlnx,afi-fpga.txt
new file mode 100644
index 000000000000..85f8970010b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xlnx,afi-fpga.txt
@@ -0,0 +1,61 @@
+Xilinx ZynqMp AFI interface Manager
+
+The Zynq UltraScale+ MPSoC Processing System core provides access from PL
+masters to PS internal peripherals, and memory through AXI FIFO interface
+(AFI) interfaces.
+
+Required properties:
+-compatible: Should contain "xlnx,afi-fpga"
+-config-afi: Pairs of <regid value >
+
+The possible values of regid and values are
+ regid: Regids of the register to be written possible values
+ 0- AFIFM0_RDCTRL
+ 1- AFIFM0_WRCTRL
+ 2- AFIFM1_RDCTRL
+ 3- AFIFM1_WRCTRL
+ 4- AFIFM2_RDCTRL
+ 5- AFIFM2_WRCTRL
+ 6- AFIFM3_RDCTRL
+ 7- AFIFM3_WRCTRL
+ 8- AFIFM4_RDCTRL
+ 9- AFIFM4_WRCTRL
+ 10- AFIFM5_RDCTRL
+ 11- AFIFM5_WRCTRL
+ 12- AFIFM6_RDCTRL
+ 13- AFIFM6_WRCTRL
+ 14- AFIFS
+ 15- AFIFS_SS2
+- value: Array of values to be written.
+ for FM0_RDCTRL(0) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM0_WRCTRL(1) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM1_RDCTRL(2) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM1_WRCTRL(3) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM2_RDCTRL(4) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM2_WRCTRL(5) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM3_RDCTRL(6) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM3_WRCTRL(7) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM4_RDCTRL(8) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM4_WRCTRL(9) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM5_RDCTRL(10) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM5_WRCTRL(11) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM6_RDCTRL(12) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM6_WRCTRL(13) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for AFI_FA(14)
+ dw_ss1_sel bits (11:10)
+ dw_ss0_sel bits (9:8)
+ 0x0: 32-bit AXI data width),
+ 0x1: 64-bit AXI data width,
+ 0x2: 128-bit AXI data
+ All other bits are 0 write ignored.
+
+ for AFI_FA(15) selects for ss2AXI data width valid values
+ 0x000: 32-bit AXI data width),
+ 0x100: 64-bit AXI data width,
+ 0x200: 128-bit AXI data
+
+Example:
+afi0: afi0 {
+ compatible = "xlnx,afi-fpga";
+ config-afi = <0 2>, <1 1>, <2 1>;
+};
diff --git a/Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.txt b/Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.txt
new file mode 100644
index 000000000000..acca970cd341
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.txt
@@ -0,0 +1,10 @@
+Device Tree versal-fpga bindings for the Versal SOC, Controlled
+using Versal SoC firmware interface.
+
+Required properties:
+- compatible: should contain "xlnx,versal-fpga"
+
+Example:
+ versal_fpga: fpga {
+ compatible = "xlnx,versal-fpga";
+ };
diff --git a/Documentation/devicetree/bindings/fpga/xlnx,zynq-afi-fpga.txt b/Documentation/devicetree/bindings/fpga/xlnx,zynq-afi-fpga.txt
new file mode 100644
index 000000000000..e00942cf3091
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xlnx,zynq-afi-fpga.txt
@@ -0,0 +1,19 @@
+Xilinx Zynq AFI interface Manager
+
+The Zynq Processing System core provides access from PL masters to PS
+internal peripherals, and memory through AXI FIFO interface
+(AFI) interfaces.
+
+Required properties:
+-compatible: Should contain "xlnx,zynq-afi-fpga"
+-reg: Physical base address and size of the controller's register area.
+-xlnx,afi-buswidth : Size of the afi bus width.
+ 0: 64-bit AXI data width,
+ 1: 32-bit AXI data width,
+
+Example:
+afi0: afi0 {
+ compatible = "xlnx,zynq-afi-fpga";
+ reg = <0xf8008000 0x1000>;
+ xlnx,afi-buswidth = <1>;
+};
diff --git a/Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt b/Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt
index 3052bf619dd5..105943a48610 100644
--- a/Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt
+++ b/Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt
@@ -4,11 +4,15 @@ Programmable Logic (PL). The configuration uses the firmware interface.
Required properties:
- compatible: should contain "xlnx,zynqmp-pcap-fpga"
+- clocks: phandle for clocks required operation
+- clock-names: name for the clock, should be "ref_clk"
Example for full FPGA configuration:
fpga-region0 {
compatible = "fpga-region";
+ clocks = <&clkc 41>;
+ clock-names = "ref_clk";
fpga-mgr = <&zynqmp_pcap>;
#address-cells = <0x1>;
#size-cells = <0x1>;
diff --git a/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt b/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
index 08eed2335db0..516f4f50b124 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
@@ -8,11 +8,17 @@ local interrupts can be enabled on channel basis.
Required properties:
- compatible : Should be "xlnx,xps-gpio-1.00.a"
- reg : Address and length of the register set for the device
-- #gpio-cells : Should be two. The first cell is the pin number and the
- second cell is used to specify optional parameters (currently unused).
+- #gpio-cells : Should be two or three. The first cell is the pin number,
+ The second cell is used to specify channel offset:
+ 0 - first channel
+ 8 - second channel
+ The third cell is optional and used to specify flags. Use the macros
+ defined in include/dt-bindings/gpio/gpio.h
- gpio-controller : Marks the device node as a GPIO controller.
Optional properties:
+- clock-names : Should be "s_axi_aclk"
+- clocks: Input clock specifier. Refer to common clock bindings.
- interrupts : Interrupt mapping for GPIO IRQ.
- xlnx,all-inputs : if n-th bit is setup, GPIO-n is input
- xlnx,dout-default : if n-th bit is 1, GPIO-n default value is 1
@@ -23,6 +29,7 @@ Optional properties:
- xlnx,dout-default-2 : as above but the second channel
- xlnx,gpio2-width : as above but for the second channel
- xlnx,tri-default-2 : as above but for the second channel
+- xlnx,no-init : No initialisation at probe
Example:
@@ -30,6 +37,8 @@ gpio: gpio@40000000 {
#gpio-cells = <2>;
compatible = "xlnx,xps-gpio-1.00.a";
gpio-controller ;
+ clock-names = "s_axi_aclk";
+ clocks = <&clkc 71>;
interrupt-parent = <&microblaze_0_intc>;
interrupts = < 6 2 >;
reg = < 0x40000000 0x10000 >;
@@ -44,3 +53,11 @@ gpio: gpio@40000000 {
xlnx,tri-default = <0xffffffff>;
xlnx,tri-default-2 = <0xffffffff>;
} ;
+
+Example to demonstrate how reset-gpios property is used in drivers:
+
+driver: driver@80000000 {
+ compatible = "xlnx,driver";
+ reset-gpios = <&gpio 0 0 GPIO_ACTIVE_LOW>; /* gpio phandle, gpio pin-number, channel offset, flag state */
+ reg = <0x0 0x80000000 0x0 0x10000>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt b/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
index 4fa4eb5507cd..4806d846c0fc 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
@@ -6,7 +6,8 @@ Required properties:
- First cell is the GPIO line number
- Second cell is used to specify optional
parameters (unused)
-- compatible : Should be "xlnx,zynq-gpio-1.0" or "xlnx,zynqmp-gpio-1.0"
+- compatible : Should be "xlnx,zynq-gpio-1.0" or
+ "xlnx,zynqmp-gpio-1.0" or "xlnx,versal-gpio-1.0
- clocks : Clock specifier (see clock bindings for details)
- gpio-controller : Marks the device node as a GPIO controller.
- interrupts : Interrupt specifier (see interrupt bindings for
diff --git a/Documentation/devicetree/bindings/iio/adc/xilinx-ams.txt b/Documentation/devicetree/bindings/iio/adc/xilinx-ams.txt
new file mode 100644
index 000000000000..3d1e77014865
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/xilinx-ams.txt
@@ -0,0 +1,159 @@
+Xilinx AMS device driver
+
+The AMS includes an ADC as well as on-chip sensors that can be used to
+sample external voltages and monitor on-die operating conditions, such as
+temperature and supply voltage levels. The AMS has two SYSMON blocks.
+PL-SYSMON block is capable of monitoring off chip voltage and temperature.
+PL-SYSMON block has DRP, JTAG and I2C interface to enable monitoring from
+external master. Out of this interface currenlty only DRP is supported.
+Other block PS-SYSMON is memory mapped to PS. Both of block has built-in
+alarm generation logic that is used to interrupt the processor based on
+condition set.
+
+All designs should have AMS register, but PS and PL are optional depending on
+the design. The driver can work with only PS, only PL and both PS and PL
+configurations. Please specify registers according to your design. DTS file
+should always have AMS module property. Providing PS & PL module is optional.
+
+Required properties:
+ - compatible: Should be "xlnx,zynqmp-ams"
+ - reg: Should specify AMS register space
+ - interrupts: Interrupt number for the AMS control interface
+ - interrupt-names: Interrupt name, must be "ams-irq"
+ - clocks: Should contain a clock specifier for the device
+ - ranges: keep the property empty to map child address space
+ (for PS and/or PL) nodes 1:1 onto the parent address
+ space
+
+AMS device tree subnode:
+ - compatible: Should be "xlnx,zynqmp-ams-ps" or "xlnx,zynqmp-ams-pl"
+ - reg: Register space for PS or PL
+
+Optional properties:
+
+Following optional property only valid for PL.
+ - xlnx,ext-channels: List of external channels that are connected to the
+ AMS PL module.
+
+ The child nodes of this node represent the external channels which are
+ connected to the AMS Module. If the property is not present
+ no external channels will be assumed to be connected.
+
+ Each child node represents one channel and has the following
+ properties:
+ Required properties:
+ * reg: Pair of pins the channel is connected to.
+ 0: VP/VN
+ 1: VUSER0
+ 2: VUSER1
+ 3: VUSER3
+ 4: VUSER4
+ 5: VAUXP[0]/VAUXN[0]
+ 6: VAUXP[1]/VAUXN[1]
+ ...
+ 20: VAUXP[15]/VAUXN[15]
+ Note each channel number should only be used at most
+ once.
+ Optional properties:
+ * xlnx,bipolar: If set the channel is used in bipolar
+ mode.
+
+
+Example:
+ xilinx_ams: ams@ffa50000 {
+ compatible = "xlnx,zynqmp-ams";
+ interrupt-parent = <&gic>;
+ interrupts = <0 56 4>;
+ interrupt-names = "ams-irq";
+ clocks = <&clkc 70>;
+ reg = <0x0 0xffa50000 0x0 0x800>;
+ reg-names = "ams-base";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ams_ps: ams_ps@ffa50800 {
+ compatible = "xlnx,zynqmp-ams-ps";
+ reg = <0x0 0xffa50800 0x0 0x400>;
+ };
+
+ ams_pl: ams_pl@ffa50c00 {
+ compatible = "xlnx,zynqmp-ams-pl";
+ reg = <0x0 0xffa50c00 0x0 0x400>;
+ xlnx,ext-channels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@0 {
+ reg = <0>;
+ xlnx,bipolar;
+ };
+ channel@1 {
+ reg = <1>;
+ };
+ channel@8 {
+ reg = <8>;
+ xlnx,bipolar;
+ };
+ };
+ };
+ };
+
+AMS Channels Details:
+
+Sysmon Block |Channel| Details |Measurement
+ Number Type
+---------------------------------------------------------------------------------------------------------
+AMS CTRL |0 |System PLLs voltage measurement, VCC_PSPLL. |Voltage
+ |1 |Battery voltage measurement, VCC_PSBATT. |Voltage
+ |2 |PL Internal voltage measurement, VCCINT. |Voltage
+ |3 |Block RAM voltage measurement, VCCBRAM. |Voltage
+ |4 |PL Aux voltage measurement, VCCAUX. |Voltage
+ |5 |Voltage measurement for six DDR I/O PLLs, VCC_PSDDR_PLL. |Voltage
+ |6 |VCC_PSINTFP_DDR voltage measurement. |Voltage
+---------------------------------------------------------------------------------------------------------
+PS Sysmon |7 |LPD temperature measurement. |Temperature
+ |8 |FPD Temperature Measurment (REMOTE). |Temperature
+ |9 |VCC PS LPD voltage measurement (supply1). |Voltage
+ |10 |VCC PS FPD voltage measurement (supply2). |Voltage
+ |11 |PS Aux voltage reference (supply3). |Voltage
+ |12 |DDR I/O VCC voltage measurement. |Voltage
+ |13 |PS IO Bank 503 voltage measurement (supply5). |Voltage
+ |14 |PS IO Bank 500 voltage measurement (supply6). |Voltage
+ |15 |VCCO_PSIO1 voltage measurement. |Voltage
+ |16 |VCCO_PSIO2 voltage measurement. |Voltage
+ |17 |VCC_PS_GTR voltage measurement (VPS_MGTRAVCC). |Voltage
+ |18 |VTT_PS_GTR voltage measurement (VPS_MGTRAVTT). |Voltage
+ |19 |VCC_PSADC voltage measurement. |Voltage
+---------------------------------------------------------------------------------------------------------
+PL Sysmon |20 |PL Temperature measurement. |Temperature
+ |21 |PL Internal Voltage Voltage measurement, VCCINT. |Voltage
+ |22 |PL Auxiliary Voltage measurement, VCCAUX. |Voltage
+ |23 |ADC Reference P+ Voltage measurement. |Voltage
+ |24 |ADC Reference N- Voltage measurement. |Voltage
+ |25 |PL Block RAM Voltage measurement, VCCBRAM. |Voltage
+ |26 |LPD Internal Voltage measurement, VCC_PSINTLP (supply4). |Voltage
+ |27 |FPD Internal Voltage measurement, VCC_PSINTFP (supply5). |Voltage
+ |28 |PS Auxiliary Voltage measurement (supply6). |Voltage
+ |29 |PL VCCADC Voltage measurement (vccams). |Voltage
+ |30 |Differencial analog input signal Voltage measurment. |Voltage
+ |31 |VUser0 Voltage measurement (supply7). |Voltage
+ |32 |VUser1 Voltage measurement (supply8). |Voltage
+ |33 |VUser2 Voltage measurement (supply9). |Voltage
+ |34 |VUser3 Voltage measurement (supply10). |Voltage
+ |35 |Auxiliary ch 0 Voltage measurement (VAux0). |Voltage
+ |36 |Auxiliary ch 1 Voltage measurement (VAux1). |Voltage
+ |37 |Auxiliary ch 2 Voltage measurement (VAux2). |Voltage
+ |38 |Auxiliary ch 3 Voltage measurement (VAux3). |Voltage
+ |39 |Auxiliary ch 4 Voltage measurement (VAux4). |Voltage
+ |40 |Auxiliary ch 5 Voltage measurement (VAux5). |Voltage
+ |41 |Auxiliary ch 6 Voltage measurement (VAux6). |Voltage
+ |42 |Auxiliary ch 7 Voltage measurement (VAux7). |Voltage
+ |43 |Auxiliary ch 8 Voltage measurement (VAux8). |Voltage
+ |44 |Auxiliary ch 9 Voltage measurement (VAux9). |Voltage
+ |45 |Auxiliary ch 10 Voltage measurement (VAux10). |Voltage
+ |46 |Auxiliary ch 11 Voltage measurement (VAux11). |Voltage
+ |47 |Auxiliary ch 12 Voltage measurement (VAux12). |Voltage
+ |48 |Auxiliary ch 13 Voltage measurement (VAux13). |Voltage
+ |49 |Auxiliary ch 14 Voltage measurement (VAux14). |Voltage
+ |50 |Auxiliary ch 15 Voltage measurement (VAux15). |Voltage
+---------------------------------------------------------------------------------------------------------
diff --git a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
index e0e0755cabd8..fecb1afdd8c1 100644
--- a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
@@ -15,6 +15,8 @@ Required properties:
configuration interface to interface to the XADC hardmacro.
* "xlnx,axi-xadc-1.00.a": When using the axi-xadc pcore to
interface to the XADC hardmacro.
+ * "xlnx,axi-sysmon-1.3": When using the axi-sysmon pcore to
+ interface to the sysmon hardmacro.
- reg: Address and length of the register set for the device
- interrupts: Interrupt for the XADC control interface.
- clocks: When using the ZYNQ this must be the ZYNQ PCAP clock,
@@ -110,3 +112,20 @@ Examples:
};
};
};
+
+ xadc@44a00000 {
+ compatible = "xlnx,axi-sysmon-1.3";
+ interrupt-parent = <&axi_intc_0>;
+ interrupts = <2 2>;
+ clocks = <&clk_bus_0>;
+ reg = <0x44a00000 0x10000>;
+
+ xlnx,channels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@0 {
+ reg = <0>;
+ xlnx,bipolar;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/xilinx,intc.txt b/Documentation/devicetree/bindings/interrupt-controller/xilinx,intc.txt
new file mode 100644
index 000000000000..03b39f4b1625
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/xilinx,intc.txt
@@ -0,0 +1,56 @@
+Xilinx Interrupt Controller
+
+The controller is a soft IP core that is configured at build time for the
+number of interrupts and the type of each interrupt. These details cannot
+be changed at run time.
+
+Required properties:
+
+- compatible : should be "xlnx,xps-intc-1.00.a"
+- reg : Specifies base physical address and size of the registers.
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. The value shall be a minimum of 1.
+ The Xilinx device trees typically use 2 but the 2nd value
+ is not used.
+- xlnx,kind-of-intr : A 32 bit value specifying the interrupt type for each
+ possible interrupt (1 = edge, 0 = level). The interrupt
+ type typically comes in thru the device tree node of
+ the interrupt generating device, but in this case
+ the interrupt type is determined by the interrupt
+ controller based on how it was implemented.
+- xlnx,num-intr-inputs: Specifies the number of interrupts supported
+ by the specific implementation of the controller (1-32).
+
+Optional properties:
+- interrupt-parent : Specifies an interrupt controller from which it is
+ chained (cascaded).
+- interrupts : Specifies the interrupt of the parent controller from which
+ it is chained.
+
+Example:
+
+axi_intc_0: interrupt-controller@41800000 {
+ #interrupt-cells = <2>;
+ compatible = "xlnx,xps-intc-1.00.a";
+ interrupt-controller;
+ reg = <0x41800000 0x10000>;
+ xlnx,kind-of-intr = <0x1>;
+ xlnx,num-intr-inputs = <0x1>;
+};
+
+Chained Example:
+
+The interrupt is chained to hardware interrupt 61 (29 + 32) of the GIC
+for Zynq.
+
+axi_intc_0: interrupt-controller@41800000 {
+ #interrupt-cells = <2>;
+ compatible = "xlnx,xps-intc-1.00.a";
+ interrupt-controller;
+ interrupt-parent = <&ps7_scugic_0>;
+ interrupts = <0 29 4>;
+ reg = <0x41800000 0x10000>;
+ xlnx,kind-of-intr = <0x1>;
+ xlnx,num-intr-inputs = <0x1>;
+};
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt
new file mode 100644
index 000000000000..20460744b4c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt
@@ -0,0 +1,122 @@
+
+Xilinx MIPI CSI2 Receiver Subsystem (CSI2RxSS)
+----------------------------------------------
+
+The Xilinx MIPI CSI2 Receiver Subsystem is used to capture MIPI CSI2 traffic
+from compliant camera sensors and send the output as AXI4 Stream video data
+for image processing. The subsystem consists of a MIPI DPHY in slave mode
+which captures the data packets. This is passed along the MIPI CSI2 IP which
+extracts the packet data. This data is taken in by the Video Format Bridge
+(VFB) if selected and converted into AXI4 Stream video data at selected
+pixels per clock as per AXI4-Stream Video IP and System Design UG934.
+
+For more details, please refer to PG232 MIPI CSI-2 Receiver Subsystem v2.0
+
+Required properties:
+
+- compatible: Must contain "xlnx,mipi-csi2-rx-subsystem-4.0". The older strings
+ "xlnx,mipi-csi2-rx-subsystem-2.0" and "xlnx,mipi-csi2-rx-subsystem-3.0" are
+ deprecated.
+
+- reg: Physical base address and length of the registers set for the device.
+
+- xlnx,max-lanes: Maximum active lanes in the design.
+
+- xlnx,en-active-lanes: Enable Active lanes configuration in Protocol
+ Configuration Register.
+
+- xlnx,dphy-present: This is equivalent to whether DPHY register interface is
+ enabled or not.
+
+- xlnx,iic-present: This shows whether subsystem's IIC is present or not. This
+ affects the base address of the DPHY.
+
+- xlnx,vc: Virtual Channel, specifies virtual channel number to be filtered.
+ If this is 4 then all virtual channels are allowed.
+
+- xlnx,csi-pxl-format: This denotes the CSI Data type selected in hw design.
+ Packets other than this data type (except for RAW8 and User defined data
+ types) will be filtered out. Possible values are RAW6, RAW7, RAW8, RAW10,
+ RAW12, RAW14, RAW16, RAW20, RGB444, RGB555, RGB565, RGB666, RGB888 and YUV4228bit.
+
+- xlnx,vfb: Video Format Bridge, Denotes if Video Format Bridge is selected
+ so that output is as per AXI stream documented in UG934.
+
+- xlnx,ppc: Pixels per clock, Number of pixels to be transferred per pixel
+ clock. This is valid only if xlnx,vfb property is set to 1.
+
+- xlnx,axis-tdata-width: AXI Stream width, This denotes the AXI Stream width.
+ It depends on Data type chosen, Video Format Bridge enabled/disabled and
+ pixels per clock. If VFB is disabled then its value is either 0x20 (32 bit)
+ or 0x40(64 bit) width.
+
+- xlnx,video-format, xlnx,video-width: Video format and width, as defined in
+ video.txt.
+
+- port: Video port, using the DT bindings defined in ../video-interfaces.txt.
+ The CSI 2 Rx Subsystem has a two ports, one input port for connecting to
+ camera sensor and other is output port.
+
+- data-lanes: The number of data lanes through which CSI2 Rx Subsystem is
+ connected to the camera sensor as per video-interfaces.txt
+
+- clocks: List of phandles to AXI Lite, Video and 200 MHz DPHY clocks.
+
+- clock-names: Must contain "lite_aclk", "video_aclk" and "dphy_clk_200M" in
+ the same order as clocks listed in clocks property.
+
+Optional Properties
+
+- xlnx,en-vcx: When present, the max number of virtual channels can be 16 else 4.
+
+Example:
+
+ csiss_1: csiss@a0020000 {
+ compatible = "xlnx,mipi-csi2-rx-subsystem-4.0";
+ reg = <0x0 0xa0020000 0x0 0x20000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 95 4>;
+
+ reset-gpios = <&gpio 81 1>;
+ xlnx,max-lanes = <0x4>;
+ xlnx,en-active-lanes;
+ xlnx,dphy-present;
+ xlnx,iic-present;
+ xlnx,vc = <0x4>;
+ xlnx,csi-pxl-format = "RAW8";
+ xlnx,vfb;
+ xlnx,ppc = <0x4>;
+ xlnx,axis-tdata-width = <0x20>;
+
+ clock-names = "lite_aclk", "dphy_clk_200M", "video_aclk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>, <&misc_clk_2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+ csiss_out: endpoint {
+ remote-endpoint = <&vcap_csiss_in>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ csiss_in: endpoint {
+ data-lanes = <1 2 3 4>;
+ /* MIPI CSI2 Camera handle */
+ remote-endpoint = <&vs2016_out>;
+ };
+
+ };
+
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,mem2mem.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,mem2mem.txt
new file mode 100644
index 000000000000..73af77faeb20
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,mem2mem.txt
@@ -0,0 +1,25 @@
+Xilinx Video IP MEM2MEM Pipeline (XVIM2M)
+----------------------------------------
+
+Xilinx video IP mem2mem pipeline processes DMA transfers to achieve memory
+copy from one physical memory to other. The data is copied by employing two
+DMA transfers memory to device and device to memory transactions one after
+the other. The DT node of the XVIM2M represents as a top level node of the
+pipeline and defines mappings between DMAs.
+
+Required properties:
+
+- compatible: Must be "xlnx,mem2mem".
+
+- dmas, dma-names: List of two DMA specifier and identifier strings (as
+ defined in Documentation/devicetree/bindings/dma/dma.txt) per port.
+ Identifier string of one DMA channel should be "tx" and other should be
+ "rx".
+
+Example:
+
+ video_m2m {
+ compatible = "xlnx,mem2mem";
+ dmas = <&dma_1 0>, <&dma_2 0>;
+ dma-names = "tx", "rx";
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt
new file mode 100644
index 000000000000..1e38b7d5c5a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt
@@ -0,0 +1,66 @@
+
+Xilinx SDI Receiver Subsystem
+------------------------------
+
+The Xilinx SDI Rx Subsystem is used to capture SDI Video in upto 12G mode.
+It outputs the video as an AXI4 Stream video data in YUV 422 10bpc mode.
+The subsystem consists of the SDI Rx IP whose SDI native output is connected
+to a SDI to Native conversion Bridge. The output of the Native bridge is
+connected to a Native to AXI4S Bridge which generates the AXI4 Stream of
+YUV422 or YUV420 10 bpc in dual pixel per clock.
+
+Required properties:
+
+- compatible: Must contain "xlnx,v-smpte-uhdsdi-rx-ss"
+
+- reg: Physical base address and length of the registers set for the device.
+
+- interrupts: Contains the interrupt line number.
+
+- interrupt-parent: phandle to interrupt controller.
+
+- xlnx,include-edh: Whether the EDH processor is enabled in design or not.
+
+- xlnx,line-rate: The maximum mode supported by the design.
+
+- clocks: Input clock specifier. Refer to common clock bindings.
+
+- clock-names: List of input clocks.
+ Required elements: "s_axi_aclk", "sdi_rx_clk", "video_out_clk"
+
+- port: Video port, using the DT bindings defined in ../video-interfaces.txt.
+ The SDI Rx subsystem has one port configured as output port.
+
+- xlnx,video-format, xlnx,video-width: Video format and width, as defined in
+ video.txt. Please note that the video format is fixed to either YUV422 or YUV420
+ and the video-width is 10.
+
+Example:
+ v_smpte_uhdsdi_rx_ss: v_smpte_uhdsdi_rx_ss@80000000 {
+ compatible = "xlnx,v-smpte-uhdsdi-rx-ss";
+ interrupt-parent = <&gic>;
+ interrupts = <0 89 4>;
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,include-axilite = "true";
+ xlnx,include-edh = "true";
+ xlnx,include-vid-over-axi = "true";
+ xlnx,line-rate = "12G_SDI_8DS";
+ clocks = <&clk_1>, <&si570_1>, <&clk_2>;
+ clock-names = "s_axi_aclk", "sdi_rx_clk", "video_out_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <10>;
+
+ sdirx_out: endpoint {
+ remote-endpoint = <&vcap_sdirx_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt
new file mode 100644
index 000000000000..fb5ed47d959a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt
@@ -0,0 +1,141 @@
+Xilinx AXI4-Stream Switch
+-------------------------------
+
+The AXI4-Stream Switch provides configurable routing between masters and slaves.
+It supports up to 16 masters/sources and 16 slaves/sinks and two routing options.
+There is atleast one slave/sink port and two master/source ports.
+
+The two routing options available are TDEST routing and control register routing.
+The TDEST based routing uses design parameters and hence there no software control.
+Each port is mapped as a pad and has its own format specified.
+
+Control register routing introduces an AXI4-Lite interface to configure the
+routing table. There is one register for each of the master interfaces to
+control each of the selectors. This routing mode requires that there is
+precisely only one path between master and slave. When attempting to map the
+same slave interface to multiple master interfaces, only the lowest master
+interface is able to access the slave interface.
+Here only the slave/sink ports have formats as master/source ports will inherit
+the corresponding slave ports formats. A routing table is maintained in this case.
+
+Please refer to PG085 AXI4-Stream Infrastructure IP Suite v2.2 for more details.
+
+Required properties:
+
+ - compatible: Must be "xlnx,axis-switch-1.1".
+ - xlnx,routing-mode: Can be 0 (TDEST routing) or 1 (Control reg routing)
+ - xlnx,num-si-slots: Number of slave / input ports. Min 1 Max 16 .
+ - xlnx,num-mi-slots: Number of master / output ports. Min 1 Max 16.
+ - ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ - clocks: Reference to the AXI Streaming clock feeding the ACLK and
+ AXI4 Lite control interface clock when control routing is enabled.
+ - clock-names: Must have "aclk".
+
+Optional properties:
+ - reg: Physical base address and length of the registers set for the device.
+ This is required only if xlnx,routing-mode is 1.
+ - clocks: Reference to AXI4 Lite control interface clock when routing-mode is 1.
+ - clock-names: "s_axi_ctl_clk" clock for AXI4 Lite interface when routing-mode is 1.
+
+Example:
+
+For TDEST routing, from 1 slave port to 4 master ports
+
+ axis_switch_0: axis_switch@0 {
+ compatible = "xlnx,axis-switch-1.1";
+ xlnx,routing-mode = <0x0>;
+ xlnx,num-si-slots = <0x1>;
+ xlnx,num-mi-slots = <0x4>;
+ clocks = <&vid_stream_clk>;
+ clock-names = "aclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ switch_in0: endpoint {
+ remote-endpoint = <&csirxss_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ switch_out0: endpoint {
+ remote-endpoint = <&vcap_csirxss0_in>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ switch_out1: endpoint {
+ remote-endpoint = <&vcap_csirxss1_in>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ switch_out2: endpoint {
+ remote-endpoint = <&vcap_csirxss2_in>;
+ };
+ };
+ port@4 {
+ reg = <4>;
+ switch_out3: endpoint {
+ remote-endpoint = <&vcap_csirxss3_in>;
+ };
+ };
+ };
+
+ };
+
+For Control reg based routing, from 2 slave ports to 4 master ports
+
+ axis_switch_0: axis_switch@a0050000 {
+ compatible = "xlnx,axis-switch-1.1";
+ reg = <0x0 0xa0050000 0x0 0x1000>;
+ xlnx,routing-mode = <0x1>;
+ xlnx,num-si-slots = <0x2>;
+ xlnx,num-mi-slots = <0x4>;
+ clocks = <&vid_stream_clk>, <&misc_clk_0>;
+ clock-names = "aclk", "s_axi_ctl_clk;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ switch_in0: endpoint {
+ remote-endpoint = <&csirxss_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ switch_in1: endpoint {
+ remote-endpoint = <&tpg_out>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ switch_out0: endpoint {
+ remote-endpoint = <&vcap_csirxss0_in>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ switch_out1: endpoint {
+ remote-endpoint = <&vcap_csirxss1_in>;
+ };
+ };
+ port@4 {
+ reg = <4>;
+ switch_out2: endpoint {
+ remote-endpoint = <&vcap_csirxss2_in>;
+ };
+ };
+ port@5 {
+ reg = <5>;
+ switch_out3: endpoint {
+ remote-endpoint = <&vcap_csirxss3_in>;
+ };
+ };
+ };
+
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cfa.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cfa.txt
new file mode 100644
index 000000000000..cdb0886cf975
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cfa.txt
@@ -0,0 +1,58 @@
+Xilinx Color Filter Array (CFA)
+-------------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-cfa-7.0".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the video core clock.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The cfa has an input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be SENSOR_MONO for the input port (0), and RBG for
+ the output port (1).
+
+- xlnx,video-width: Video width as defined in video.txt
+
+- xlnx, cfa-pattern: Must be one of "rggb", "grbg", "gbrg", and "bggr" for the
+ input port (0). Must not be specified for the output port (1).
+
+Example:
+
+ cfa_0: cfa@400b0000 {
+ compatible = "xlnx,v-cfa-7.0";
+ reg = <0x400b0000 0x10000>;
+ clocks = <&clkc 15>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_SENSOR_MONO>;
+ xlnx,video-width = <8>;
+ xlnx,cfa-pattern = "rggb";
+
+ cfa0_in: endpoint {
+ remote-endpoint = <&spc0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ cfa0_out: endpoint {
+ remote-endpoint = <&ccm0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cresample.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cresample.txt
new file mode 100644
index 000000000000..f404ee301272
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cresample.txt
@@ -0,0 +1,54 @@
+Xilinx Chroma Resampler (CRESAMPLE)
+-----------------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-cresample-4.0".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the video core clock.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The cresample as han input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be one of YUV_444, YUV_422 or YUV_420 for the input
+ port (0), and one of YUV_422 or YUV_420 for the output port (1).
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+
+ cresample_0: cresample@40120000 {
+ compatible = "xlnx,v-cresample-4.0";
+ reg = <0x40120000 0x10000>;
+ clocks = <&clkc 15>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_444>;
+ xlnx,video-width = <8>;
+
+ cresample0_in: endpoint {
+ remote-endpoint = <&rgb2yuv0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ cresample0_out: endpoint {
+ remote-endpoint = <&scaler0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt
new file mode 100644
index 000000000000..9b3aff413e0e
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt
@@ -0,0 +1,62 @@
+Xilinx Video Demosaic IP
+-----------------------------
+The Xilinx Video Demosaic IP is used to interface to a Bayer video source.
+
+The driver set default Sink Pad media bus format to RGGB.
+The IP and driver only support RGB as its Source Pad media format.
+
+Required properties:
+
+- compatible: Must be "xlnx,v-demosaic".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the AXI Streaming clock feeding the Demosaic ap_clk.
+
+- xlnx,max-height: Maximum number of lines. Valid range is 64 to 4320.
+
+- xlnx,max-width: Maximum number of pixels in a line. Valid range is 64 to 8192.
+
+- reset-gpios: Specifier for GPIO that asserts Demosaic IP (AP_RST_N) reset.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+
+Required port properties:
+
+- reg: This value represents the media pad of the V4L2 sub-device.
+ A Sink Pad is represented by reg = <0>
+ A Source Pad is represented by reg = <1>
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+ demosaic_1: demosaic@a00b0000 {
+ compatible = "xlnx,v-demosaic";
+ reg = <0x0 0xa00b0000 0x0 0x10000>;
+ clocks = <&vid_stream_clk>;
+ reset-gpios = <&gpio 87 1>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ xlnx,video-width = <8>;
+
+ demosaic_in: endpoint {
+ remote-endpoint = <&tpg_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ xlnx,video-width = <8>;
+
+ demosaic_out: endpoint {
+ remote-endpoint = <&gamma_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt
new file mode 100644
index 000000000000..7bd750f009b4
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt
@@ -0,0 +1,63 @@
+Xilinx Video Gamma Correction IP
+-----------------------------------
+The Xilinx Video Gamma Correction IP is used to provide RGB gamma correction.
+The IP provides a look up table for each R,G and B components.
+
+Required properties:
+
+- compatible: Must be "xlnx,v-gamma-lut".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the clock that drives the ap_clk
+ signal of Video Gamma Lookup.
+
+- xlnx,max-height: Maximum number of lines. Valid range is 64 to 4320.
+
+- xlnx,max-width: Maximum number of pixels in a line. Valid range is 64 to 8192.
+
+- reset-gpios: Specifier for a GPIO that asserts Gamma IP (AP_RST_N) reset
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The Gamma LUT IP has an input port (0) and an output port (1).
+
+
+Required port properties:
+- reg: This value represents the media pad of the V4L2 sub-device.
+ A Sink Pad is represented by reg = <0>
+ A Source Pad is represented by reg = <1>
+
+- xlnx,video-width: Video width as defined in video.txt. Can be either 8 or 10.
+
+Example:
+
+ gamma_lut_1: gamma_lut_1@0xa0080000 {
+ compatible = "xlnx,v-gamma-lut";
+ reg = <0x0 0xa0080000 0x0 0x10000>;
+ clocks = <&vid_stream_clk>;
+ reset-gpios = <&gpio 83 1>;
+ xlnx,max-height = <2160>;
+ xlnx,max-width = <3840>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ xlnx,video-width = <8>;
+
+ gamma_in: endpoint {
+ remote-endpoint = <&demosaic_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ xlnx,video-width = <8>;
+
+ gamma_out: endpoint {
+ remote-endpoint = <&csc_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-hls.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-hls.txt
new file mode 100644
index 000000000000..a6db3040565a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-hls.txt
@@ -0,0 +1,64 @@
+Xilinx High-Level Synthesis Core (HLS)
+--------------------------------------
+
+High-Level Synthesis cores are synthesized from a high-level function
+description developed by the user. As such their functions vary widely, but
+they all share a set of common characteristics that allow them to be described
+by common bindings.
+
+
+Required properties:
+
+- compatible: This property must contain "xlnx,v-hls" to indicate that the
+ core is compatible with the generic Xilinx HLS DT bindings. It can also
+ contain a more specific string to identify the HLS core implementation. The
+ value of those implementation-specific strings is out of scope for these DT
+ bindings.
+
+- reg: Physical base address and length of the registers sets for the device.
+ The HLS core has two registers sets, the first one contains the core
+ standard registers and the second one contains the custom user registers.
+
+- clocks: Reference to the video core clock.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The HLS core has one input port (0) and one output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Video format as defined in video.txt.
+- xlnx,video-width: Video width as defined in video.txt.
+
+Example:
+
+ hls_0: hls@43c00000 {
+ compatible = "xlnx,v-hls-sobel", "xlnx,v-hls";
+ reg = <0x43c00000 0x24>, <0x43c00024 0xa0>;
+ clocks = <&clkc 15>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ hls0_in: endpoint {
+ remote-endpoint = <&vdma_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ hls0_out: endpoint {
+ remote-endpoint = <&vdma_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-multi-scaler.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-multi-scaler.txt
new file mode 100644
index 000000000000..3aea1f36a6ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-multi-scaler.txt
@@ -0,0 +1,95 @@
+Xilinx mem2mem Multi Video Scaler (XM2MSC)
+-----------------------------------------
+
+Required propertie(s):
+- compatible : Should be "xlnx,v-multi-scaler-v1.0"
+- clocks : Input clock specifier. Refer to common clk bindings.
+- interrupt-parent : Interrupt controller the interrupt is routed through
+- interrupts : Should contain MultiScaler interrupt
+- reset-gpios : Should contain GPIO reset phandle
+- reg : Physical base address and
+ length of the registers set for the device.
+- xlnx,max-chan : Maximum number of supported scaling channels (1 - 8)
+- xlnx,max-width : Maximum number of supported column/width (64 - 3840)
+- xlnx,max-height : Maximum number of supported row/height (64 - 2160)
+- xlnx,dma-addr-width : dma address width (either 32 or 64)
+- xlnx,pixels-per-clock : pixels per clock set in IP (1, 2 or 4)
+- xlnx,vid-formats : A list of strings indicating what video memory
+ formats the IP has been configured to support.
+ See VIDEO FORMATS table below and examples.
+- xlnx,num-taps : The number of filter taps for scaling (6, 8, 10, 12)
+
+VIDEO FORMATS
+The following table describes the legal string values to be used for
+the xlnx,vid-formats property. To the left is the string value and the
+column to the right describes the format.
+
+IP FORMAT DTS String Description
+-------------|----------------|---------------------
+RGB8 bgr888 Packed RGB, 8 bits per component.
+ Every RGB pixel in memory is represented with
+ 24 bits.
+RGBX8 xbgr8888 Packed RGB, 8 bits per component. Every RGB
+ pixel in memory is represented with 32 bits.
+ Bits[31:24] do not contain pixel information.
+BGRX8 xrgb8888 Packed BGR, 8 bits per component. Every BGR
+ pixel in memory is represented with 32 bits.
+ Bits[31:24] do not contain pixel information.
+RGBX10 xbgr2101010 Packed RGB, 10 bits per component. Every RGB
+ pixel is represented with 32 bits. Bits[31:30]
+ do not contain any pixel information.
+YUV8 vuy888 Packed YUV 4:4:4, 8 bits per component. Every
+ YUV 4:4:4 pixel in memory is represented with
+ 24 bits.
+YUVX8 xvuy8888 Packed YUV 4:4:4, 8 bits per component.
+ Every YUV 4:4:4 pixel in memory is represented
+ with 32 bits. Bits[31:24] do not contain pixel
+ information.
+YUYV8 yuyv Packed YUV 4:2:2, 8 bits per component. Every
+ two YUV 4:2:2 pixels in memory are represented
+ with 32 bits.
+UYVY8 uyvy Packed YUV 4:2:2, 8 bits per component.
+ Every two YUV 4:2:2 pixels in memory are
+ represented with 32 bits.
+YUVX10 yuvx2101010 Packed YUV 4:4:4, 10 bits per component.
+ Every YUV 4:4:4 pixel is represented with 32 bits.
+ Bits[31:30] do not contain any pixel information.
+Y8 y8 Packed Luma-Only, 8 bits per component. Every
+ luma-only pixel in memory is represented with
+ 8 bits. Y8 is presented as YUV 4:4:4 on the
+ AXI4-Stream interface.
+Y10 y10 Packed Luma-Only, 10 bits per component. Every
+ three luma-only pixels in memory is represented
+ with 32 bits. Y10 is presented as YUV 4:4:4 on
+ the AXI4-Stream interface.
+Y_UV8 nv16 Semi-planar YUV 4:2:2 with 8 bits per component.
+ Y and UV stored in separate planes.
+Y_UV8_420 nv12 Semi-planar YUV 4:2:0 with 8 bits per component.
+ Y and UV stored in separate planes.
+Y_UV10 xv20 Semi-planar YUV 4:2:2 with 10 bits per component.
+ Every 3 pixels is represented with 32 bits.
+ Bits[31:30] do not contain any pixel information.
+ Y and UV stored in separate planes.
+Y_UV10_420 xv15 Semi-planar YUV 4:2:0 with 10 bits per component.
+ Every 3 pixels is represented with 32 bits.
+ Bits[31:30] do not contain any pixel information.
+ Y and UV stored in separate planes.
+
+Example
+
+v_multi_scaler_0: v_multi_scaler@a0000000 {\
+ clocks = <&clk 71>;
+ compatible = "xlnx,v-multi-scaler-v1.0";
+ interrupt-names = "interrupt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 89 4>;
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ xlnx,vid-formats = "bgr888","vuy888";
+ reset-gpios = <&gpio 78 1>;
+ xlnx,max-chan = <0x01>;
+ xlnx,dma-addr-width = <0x20>;
+ xlnx,pixels-per-clock = /bits/ 8 <2>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+ xlnx,num-taps = <6>;
+};
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-remapper.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-remapper.txt
new file mode 100644
index 000000000000..cda02cb97a21
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-remapper.txt
@@ -0,0 +1,61 @@
+Xilinx Video Remapper
+---------------------
+
+The IP core remaps input pixel components to produce an output pixel with
+less, more or the same number of components as the input pixel.
+
+Required properties:
+
+- compatible: Must be "xlnx,v-remapper".
+
+- clocks: Reference to the video core clock.
+
+- xlnx,video-width: Video pixel component width, as defined in video.txt.
+
+- #xlnx,s-components: Number of components per pixel at the input port
+ (between 1 and 4 inclusive).
+
+- #xlnx,m-components: Number of components per pixel at the output port
+ (between 1 and 4 inclusive).
+
+- xlnx,component-maps: Remapping configuration represented as an array of
+ integers. The array contains one entry per output component, in the low to
+ high order. Each entry corresponds to the zero-based position of the
+ corresponding input component, or the value 4 to drive a constant value on
+ the output component. For example, to remap RGB to BGR use <2 1 0>, and to
+ remap RBG to xRGB use <1 0 2 4>.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The remapper as an input port (0) and and output port (1).
+
+Example: RBG to xRGB remapper
+
+ remapper_0: remapper {
+ compatible = "xlnx,v-remapper";
+
+ clocks = <&clkc 15>;
+
+ xlnx,video-width = <8>;
+
+ #xlnx,s-components = <3>;
+ #xlnx,m-components = <4>;
+ xlnx,component-maps = <1 0 2 4>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ remap0_in: endpoint {
+ remote-endpoint = <&tpg0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ remap0_out: endpoint {
+ remote-endpoint = <&sobel0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-rgb2yuv.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-rgb2yuv.txt
new file mode 100644
index 000000000000..ecd10fb31ac1
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-rgb2yuv.txt
@@ -0,0 +1,54 @@
+Xilinx RGB to YUV (RGB2YUV)
+---------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-rgb2yuv-7.1".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the video core clock.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The rgb2yuv has an input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be RBG for the input port (0) and YUV_444 for the
+ output port (1).
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+
+ rgb2yuv_0: rgb2yuv@40100000 {
+ compatible = "xlnx,v-rgb2yuv-7.1";
+ reg = <0x40100000 0x10000>;
+ clocks = <&clkc 15>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ rgb2yuv0_in: endpoint {
+ remote-endpoint = <&gamma0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_444>;
+ xlnx,video-width = <8>;
+
+ rgb2yuv0_out: endpoint {
+ remote-endpoint = <&cresample0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scaler.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scaler.txt
new file mode 100644
index 000000000000..0bb9c405f5ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scaler.txt
@@ -0,0 +1,75 @@
+Xilinx Scaler (SCALER)
+------------------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-scaler-8.1".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the video core clock.
+
+- xlnx,num-hori-taps, xlnx,num-vert-taps: The number of horizontal and vertical
+ taps for scaling filter(range: 2 - 12).
+
+- xlnx,max-num-phases: The maximum number of phases for scaling filter
+ (range: 2 - 64).
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The scaler has an input port (0) and an output port (1).
+
+Optional properties:
+
+- xlnx,separate-yc-coef: When set, this boolean property specifies that
+ the hardware uses separate coefficients for the luma and chroma filters.
+ Otherwise a single set of coefficients is shared for both.
+
+- xlnx,separate-hv-coef: When set, this boolean property specifies that
+ the hardware uses separate coefficients for the horizontal and vertical
+ filters. Otherwise a single set of coefficients is shared for both.
+
+Required port properties:
+
+- xlnx,video-format: Must be one of RBG, YUV_422, YUV_422 or YUV_420 for
+ both input port (0) and output port (1). The two formats must be identical.
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+
+ scaler_0: scaler@43c30000 {
+ compatible = "xlnx,v-scaler-8.1";
+ reg = <0x43c30000 0x10000>;
+ clocks = <&clkc 15>;
+
+ xlnx,num-hori-taps = <12>;
+ xlnx,num-vert-taps = <12>;
+ xlnx,max-num-phases = <4>;
+ xlnx,separate-hv-coef;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ scaler0_in: endpoint {
+ remote-endpoint = <&cresample0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ scaler0_out: endpoint {
+ remote-endpoint = <&vcap0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scd.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scd.txt
new file mode 100644
index 000000000000..a05e9712c833
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scd.txt
@@ -0,0 +1,164 @@
+Xilinx Scene Change Detection IP (SCD)
+--------------------------------------
+
+The Xilinx Scene Change Detection IP contains two blocks: one IP block is used
+for reading video frame data from memory to the device and the other IP block
+is used for determining whether there is a scene change between current and the
+previous frame. The IP supports YUV planar and semi-planar formats. IP only
+needs luma frame to determine the scene change event. The IP supports memory
+based model, which means that it will accept a dma buffer address and perform
+MEM2DEV transfer followed by statistical based image processing and give the
+data back to application if scene change detection is present or not.
+
+Another version of scene change detection IP which supports streaming model,
+which means that IP can be inserted in a capture pipeline. For example,
+"hdmirx -> streaming-scd -> fb_wr" is a typical capture pipeline where
+streaming SCD can be embedded. The IP accespts the AXI video data and perform
+histogram based statistical analysis to detect scene change. The IP supports
+single channel.
+
+Required properties:
+
+- compatible: Should be "xlnx,v-scd"
+
+- reg: Physical base address and length of the registers set for the device
+
+- clocks: Reference to the video core clock.
+
+- reset-gpios: Specifier for a GPIO that assert SCD (AP_RST_N) reset.
+
+- xlnx,memory-based: This is to differentiate between memory based and
+ streaming based IP. The value is 1 for memory based and 0 for streaming
+ based IPs.
+
+- xlnx,numstreams: Maximum active streams IP can support is 8 and this is based
+ on the design.
+
+- xlnx,addrwidth: Size of dma address pointer in IP (either 32 or 64)
+
+- subdev: Each channel will have its own subdev node. Each subdev will have its
+ sink port.
+
+- port: Video port, using the DT bindings defined in ../video-interfaces.txt.
+
+Example:
+
+1. Memory based device tree
+
+The following example shows how the device tree would look like for a memory
+based design where 8 streams are enabled.
+
+ scd: scenechange@a0100000 {
+ compatible = "xlnx,v-scd";
+ reg = <0x0 0xa0100000 0x0 0x1fff>;
+ clocks = <&misc_clk_0>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 90 4>;
+ reset-gpios = <&gpio 94 1>;
+
+ xlnx,memory-based;
+ xlnx,numstreams = <8>;
+ xlnx,addrwidth = <0x20>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #dma-cells = <1>;
+
+ subdev@0 {
+ port@0 {
+ reg = <0>;
+ scd_in0: endpoint {
+ remote-endpoint = <&vcap0_out0>;
+ };
+ };
+ };
+ subdev@1 {
+ port@0 {
+ reg = <0>;
+ scd_in1: endpoint {
+ remote-endpoint = <&vcap0_out1>;
+ };
+ };
+ };
+ subdev@2 {
+ port@0 {
+ reg = <0>;
+ scd_in2: endpoint {
+ remote-endpoint = <&vcap0_out2>;
+ };
+ };
+ };
+ subdev@3 {
+ port@0 {
+ reg = <0>;
+ scd_in3: endpoint {
+ remote-endpoint = <&vcap0_out3>;
+ };
+ };
+ };
+ subdev@4 {
+ port@0 {
+ reg = <0>;
+ scd_in4: endpoint {
+ remote-endpoint = <&vcap0_out4>;
+ };
+ };
+ };
+ subdev@5 {
+ port@0 {
+ reg = <0>;
+ scd_in5: endpoint {
+ remote-endpoint = <&vcap0_out5>;
+ };
+ };
+ };
+ subdev@6 {
+ port@0 {
+ reg = <0>;
+ scd_in6: endpoint {
+ remote-endpoint = <&vcap0_out6>;
+ };
+ };
+ };
+ subdev@7 {
+ port@0 {
+ reg = <0>;
+ scd_in7: endpoint {
+ remote-endpoint = <&vcap0_out7>;
+ };
+ };
+ };
+ };
+
+2. Streaming based device tree
+
+The following example shows how the device tree would look like for a streaming
+based design.
+
+ scd: scenechange@a0280000 {
+ compatible = "xlnx,v-scd";
+ reg = <0x0 0xa0280000 0x0 0x1fff>;
+ clocks = <&clk 72>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 111 4>;
+ reset-gpios = <&gpio 100 1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ xlnx,numstreams = <1>;
+
+ scd {
+ port@0 {
+ reg = <0x0>;
+ scd_in0: endpoint {
+ remote-endpoint = <&vpss_scaler_out>;
+ };
+ };
+
+ port@1 {
+ reg = <0x1>;
+ scd_out0: endpoint {
+ remote-endpoint = <&vcap_hdmi_in_1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-switch.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-switch.txt
new file mode 100644
index 000000000000..91dc3af4a2b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-switch.txt
@@ -0,0 +1,55 @@
+Xilinx Video Switch
+-------------------
+
+Required properties:
+
+ - compatible: Must be "xlnx,v-switch-1.0".
+
+ - reg: Physical base address and length of the registers set for the device.
+
+ - clocks: Reference to the video core clock.
+
+ - #xlnx,inputs: Number of input ports
+ - #xlnx,outputs: Number of outputs ports
+
+ - ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+
+Example:
+
+ switch: switch@43c10000 {
+ compatible = "xlnx,v-switch-1.0";
+ reg = <0x43c10000 0x10000>;
+ clocks = <&clkc 15>;
+
+ #xlnx,inputs = <2>;
+ #xlnx,outputs = <2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ switch_in0: endpoint {
+ remote-endpoint = <&tpg_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ switch_in1: endpoint {
+ remote-endpoint = <&cresample0_out>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ switch_out0: endpoint {
+ remote-endpoint = <&scaler0_in>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ switch_out1: endpoint {
+ remote-endpoint = <&vcap0_in1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt
index 439351ab2a79..4b2126a78a3f 100644
--- a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt
@@ -6,7 +6,8 @@ Required properties:
- compatible: Must contain at least one of
"xlnx,v-tpg-5.0" (TPG version 5.0)
- "xlnx,v-tpg-6.0" (TPG version 6.0)
+ "xlnx,v-tpg-7.0" (TPG version 7.0)
+ "xlnx,v-tpg-8.0" (TPG version 8.0)
TPG versions backward-compatible with previous versions should list all
compatible versions in the newer to older order.
@@ -23,6 +24,8 @@ Required properties:
Optional properties:
+- xlnx,ppc: Pixels per clock. Valid values are 1, 2, 4 or 8.
+
- xlnx,vtc: A phandle referencing the Video Timing Controller that generates
video timings for the TPG test patterns.
@@ -30,16 +33,26 @@ Optional properties:
input. The GPIO active level corresponds to the selection of VTC-generated
video timings.
+- reset-gpios: Specifier for a GPIO that assert TPG (AP_RST_N) reset.
+ This property is mandatory for TPG v7.0 and above.
+
+- xlnx,max-height: Maximum number of lines.
+ This property is mandatory for TPG v8.0. Value ranges from 64 to 7760.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ This property is mandatory for TPG v8.0. Value ranges from 64 to 10328.
+
The xlnx,vtc and timing-gpios properties are mandatory when the TPG is
synthesized with two ports and forbidden when synthesized with one port.
Example:
tpg_0: tpg@40050000 {
- compatible = "xlnx,v-tpg-6.0", "xlnx,v-tpg-5.0";
+ compatible = "xlnx,v-tpg-5.0";
reg = <0x40050000 0x10000>;
clocks = <&clkc 15>;
+ xlnx,ppc = <2>;
xlnx,vtc = <&vtc_3>;
timing-gpios = <&ps7_gpio_0 55 GPIO_ACTIVE_LOW>;
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt
new file mode 100644
index 000000000000..b3627af85e6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt
@@ -0,0 +1,66 @@
+Xilinx VPSS Color Space Converter (CSC)
+-----------------------------------------
+The Xilinx VPSS Color Space Converter (CSC) is a Video IP that supports
+color space conversion from RGB input to YUV output.
+
+Required properties:
+
+- compatible: Must be "xlnx,v-vpss-csc".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the clock that drives the ap_clk signal.
+
+- xlnx,max-height: Maximum number of lines.
+ Valid range from 64 to 4320.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ Valid range from 64 to 8192.
+
+- reset-gpios: Specifier for a GPIO that assert VPSS CSC (AP_RST_N) reset.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The scaler has an input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be XVIP_VF_RBG, XVIP_VF_YUV_444 or XVIP_VF_YUV_422
+ for input port (0) and XVIP_VF_RBG, XVIP_VF_YUV_444 or XVIP_VF_YUV_422
+ for output port (1). See <dt-bindings/media/xilinx-vip.h> for more details.
+
+- xlnx,video-width: Video width as defined in video.txt. Must be either 8 or 10.
+
+Example:
+ csc_1:csc@a0040000 {
+ compatible = "xlnx,v-vpss-csc";
+ reg = <0x0 0xa0040000 0x0 0x10000>;
+ clocks = <&vid_stream_clk>;
+ reset-gpios = <&gpio 84 1>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* Sink Pad */
+ port@0 {
+ reg = <0>;
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ csc_in: endpoint {
+ remote-endpoint = <&gamma_out>;
+ };
+ };
+ /* Source Pad */
+ port@1 {
+ reg = <1>;
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ csc_out: endpoint {
+ remote-endpoint = <&scalar_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt
new file mode 100644
index 000000000000..c29b5f487dbf
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt
@@ -0,0 +1,93 @@
+ Xilinx VPSS Scaler
+------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-vpss-scaler-1.0".
+ The older string "xlnx,v-vpss-scaler" will be deprecated.
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the AXI Streaming clock feeding the VPSS Scaler AP_CLK
+ and AXI4 Lite control interface clock.
+
+- clock-names: Must contain "aclk_axis" and "aclk_ctrl" in the same order as
+ clocks listed in clocks property.
+
+- xlnx,num-hori-taps, xlnx,num-vert-taps: The number of horizontal and vertical
+ taps for scaling filter(range: 2,4,6,8,10,12).
+
+ A value of 2 represents bilinear filters. A value of 4 represents bicubic.
+ Values 6,8,10,12 represent polyphase filters.
+
+- xlnx,pix-per-clk : The pixels per clock property of the IP
+
+- reset-gpios: Specifier for a GPIO that assert for VPSS Scaler reset.
+ This property is mandatory for the Scaler
+
+- xlnx,max-height: Maximum number of lines.
+ Valid range from 64 to 4320.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ Valid range from 64 to 8192.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The scaler has an input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be one of XVIP_VF_RBG or XVIP_VF_YUV_422 for
+ input port (0) and must be XVIP_VF_RBG or XVIP_VF_YUV_422 or for
+ the output port (1).
+ See <dt-bindings/media/xilinx-vip.h> for more details.
+
+- reg: This value represents the media pad of the V4L2 sub-device.
+ A Sink Pad is represented by reg = <0>
+ A Source Pad is represented by reg = <1>
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+
+ scaler_1:scaler@a0000000 {
+ compatible = "xlnx,v-vpss-scaler-1.0";
+ reg = <0x0 0xa0000000 0x0 0x40000>;
+ clocks = <&vid_stream_clk>, <&misc_clk_2>;
+ clock-names = "aclk_axis", "aclk_ctrl";
+ xlnx,num-hori-taps = <8>;
+ xlnx,num-vert-taps = <8>;
+ xlnx,pix-per-clk = <2>;
+ reset-gpios = <&gpio 87 1>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ /* Sink Pad */
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ scaler_in: endpoint {
+ remote-endpoint = <&csc_out>;
+ };
+ };
+
+ port@1 {
+ /* Source Pad */
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ scaler_out: endpoint {
+ remote-endpoint = <&vcap_tpg_in>;
+ };
+ };
+ };
+
+ };
diff --git a/Documentation/devicetree/bindings/misc/jesd-phy.txt b/Documentation/devicetree/bindings/misc/jesd-phy.txt
new file mode 100644
index 000000000000..84535cb1e905
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/jesd-phy.txt
@@ -0,0 +1,24 @@
+* Xilinx JESD204B Phy
+
+Description:
+The LogiCORE™ IP JESD204 PHY core implements a JESD204B Physical interface supporting
+line rates between 1.0 and 12.5 Gb/s on 1 to 12 lanes using GTX, GTH, or GTP transceivers.
+
+Required properties:
+- compatible = "xlnx,jesd204-phy-2.0"
+- reg = Should contain JESD204B phy registers location and length
+- xlnx,pll-selection = The PLL selection 3 for QPLL and 1 For CPLL
+- xlnx,lanes = No of Lanes
+- xlnx,gt-refclk-freq = Reference frequency in Hz
+- clocks = The phandle to the clock tree
+
+Example:
+++++++++
+ jesd204_phycores:phy@41e10000 {
+ compatible = "xlnx,jesd204-phy-2.0";
+ reg = <0x41e10000 0x10000>;
+ xlnx,gt-refclk-freq = "156250000";
+ xlnx,lanes = <0x1>;
+ xlnx,pll-selection = <0x3>;
+ clocks = <&si570>;
+ };
diff --git a/Documentation/devicetree/bindings/misc/jesd204b.txt b/Documentation/devicetree/bindings/misc/jesd204b.txt
new file mode 100644
index 000000000000..53f8192c8afa
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/jesd204b.txt
@@ -0,0 +1,28 @@
+* Xilinx JESD204B core
+
+Description:
+The LogiCORE™ IP JESD204 core implements a JESD204B core
+
+Required properties:
+- compatible = Should be one of
+ "xlnx,jesd204-5.1";
+ "xlnx,jesd204-5.2";
+ "xlnx,jesd204-6.1";
+- reg = Should contain JESD204B registers location and length
+- xlnx,frames-per-multiframe = No of frames per multiframe
+- xlnx,bytes-per-frame = No of bytes per frame
+- xlnx,lanes = No of Lanes
+- xlnx,subclass = subclass
+- xlnx,node-is-transmit = should be present only for transmit nodes
+
+Example:
+++++++++
+jesd_Tx_axi_0: jesd_Tx@44a20000 {
+ compatible = "xlnx,jesd204-5.1";
+ reg = <0x44a20000 0x10000>;
+ xlnx,frames-per-multiframe = <30>;
+ xlnx,bytes-per-frame = <2>;
+ xlnx,subclass = <1>;
+ xlnx,lanes = <0x2>;
+ xlnx,node-is-transmit;
+};
diff --git a/Documentation/devicetree/bindings/misc/xilinx-axitrafgen.txt b/Documentation/devicetree/bindings/misc/xilinx-axitrafgen.txt
new file mode 100644
index 000000000000..6edb8f6a3a10
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/xilinx-axitrafgen.txt
@@ -0,0 +1,25 @@
+* Xilinx AXI Traffic generator IP
+
+Required properties:
+- compatible: "xlnx,axi-traffic-gen"
+- interrupts: Should contain AXI Traffic Generator interrupts.
+- interrupt-parent: Must be core interrupt controller.
+- reg: Should contain AXI Traffic Generator registers location and length.
+- interrupt-names: Should contain both the intr names of device - error
+ and completion.
+- xlnx,device-id: Device instance Id.
+
+Optional properties:
+- clocks: Input clock specifier. Refer to common clock bindings.
+
+Example:
+++++++++
+axi_traffic_gen_1: axi-traffic-gen@76000000 {
+ compatible = "xlnx,axi-traffic-gen-1.0", "xlnx,axi-traffic-gen";
+ clocks = <&clkc 15>;
+ interrupts = <0 2 2 2>;
+ interrupt-parent = <&axi_intc_1>;
+ interrupt-names = "err-out", "irq-out";
+ reg = <0x76000000 0x800000>;
+ xlnx,device-id = <0x0>;
+} ;
diff --git a/Documentation/devicetree/bindings/misc/xlnx,fclk.txt b/Documentation/devicetree/bindings/misc/xlnx,fclk.txt
new file mode 100644
index 000000000000..e1a1acc6c5ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/xlnx,fclk.txt
@@ -0,0 +1,12 @@
+* Xilinx fclk clock enable
+Temporary solution for enabling the PS_PL clocks.
+
+Required properties:
+- compatible: "xlnx,fclk"
+
+Example:
+++++++++
+fclk0: fclk0 {
+ compatible = "xlnx,fclk";
+ clocks = <&clkc 71>;
+};
diff --git a/Documentation/devicetree/bindings/misc/xlnx,sd-fec.txt b/Documentation/devicetree/bindings/misc/xlnx,sd-fec.txt
new file mode 100644
index 000000000000..c1c67cbf9935
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/xlnx,sd-fec.txt
@@ -0,0 +1,58 @@
+* Xilinx SDFEC(16nm) IP *
+
+The Soft Decision Forward Error Correction (SDFEC) Engine is a Hard IP block
+which provides high-throughput LDPC and Turbo Code implementations.
+The LDPC decode & encode functionality is capable of covering a range of
+customer specified Quasi-cyclic (QC) codes. The Turbo decode functionality
+principally covers codes used by LTE. The FEC Engine offers significant
+power and area savings versus implementations done in the FPGA fabric.
+
+
+Required properties:
+- compatible: Must be "xlnx,sd-fec-1.1”
+- clock-names : List of input clock names from the following:
+ - "core_clk", Main processing clock for processing core (required)
+ - "s_axi_aclk", AXI4-Lite memory-mapped slave interface clock (required)
+ - "s_axis_din_aclk", DIN AXI4-Stream Slave interface clock (optional)
+ - "s_axis_din_words-aclk", DIN_WORDS AXI4-Stream Slave interface clock (optional)
+ - "s_axis_ctrl_aclk", Control input AXI4-Stream Slave interface clock (optional)
+ - "m_axis_dout_aclk", DOUT AXI4-Stream Master interface clock (optional)
+ - "m_axis_dout_words_aclk", DOUT_WORDS AXI4-Stream Master interface clock (optional)
+ - "m_axis_status_aclk", Status output AXI4-Stream Master interface clock (optional)
+- clocks : Clock phandles (see clock_bindings.txt for details).
+- reg: Should contain Xilinx SDFEC 16nm Hardened IP block registers
+ location and length.
+- xlnx,sdfec-code : Should contain "ldpc" or "turbo" to describe the codes
+ being used.
+- xlnx,sdfec-din-words : A value 0 indicates that the DIN_WORDS interface is
+ driven with a fixed value and is not present on the device, a value of 1
+ configures the DIN_WORDS to be block based, while a value of 2 configures the
+ DIN_WORDS input to be supplied for each AXI transaction.
+- xlnx,sdfec-din-width : Configures the DIN AXI stream where a value of 1
+ configures a width of "1x128b", 2 a width of "2x128b" and 4 configures a width
+ of "4x128b".
+- xlnx,sdfec-dout-words : A value 0 indicates that the DOUT_WORDS interface is
+ driven with a fixed value and is not present on the device, a value of 1
+ configures the DOUT_WORDS to be block based, while a value of 2 configures the
+ DOUT_WORDS input to be supplied for each AXI transaction.
+- xlnx,sdfec-dout-width : Configures the DOUT AXI stream where a value of 1
+ configures a width of "1x128b", 2 a width of "2x128b" and 4 configures a width
+ of "4x128b".
+Optional properties:
+- interrupts: should contain SDFEC interrupt number
+
+Example
+---------------------------------------
+ sd_fec_0: sd-fec@a0040000 {
+ compatible = "xlnx,sd-fec-1.1";
+ clock-names = "core_clk", "s_axi_aclk", "s_axis_ctrl_aclk", "s_axis_din_aclk", "m_axis_status_aclk", "m_axis_dout_aclk";
+ clocks = <&misc_clk_2>, <&misc_clk_0>, <&misc_clk_1>, <&misc_clk_1>, <&misc_clk_1>, <&misc_clk_1>;
+ reg = <0x0 0xa0040000 0x0 0x40000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 89 4>;
+ xlnx,sdfec-code = "ldpc";
+ xlnx,sdfec-din-words = <0>;
+ xlnx,sdfec-din-width = <2>;
+ xlnx,sdfec-dout-words = <0>;
+ xlnx,sdfec-dout-width = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
index 1edbb049cccb..6b7cd42b8e95 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
@@ -14,6 +14,8 @@ Required Properties:
- "arasan,sdhci-4.9a": generic Arasan SDHCI 4.9a PHY
- "arasan,sdhci-5.1": generic Arasan SDHCI 5.1 PHY
- "rockchip,rk3399-sdhci-5.1", "arasan,sdhci-5.1": rk3399 eMMC PHY
+ - "xlnx,zynqmp-8.9a": Xilinx ZynqMP Arasan SDHCI 8.9a PHY
+ - "xlnx,versal-8.9a": Xilinx Versal Arasan SDHCI 8.9a PHY
For this device it is strongly suggested to include arasan,soc-ctl-syscon.
- "ti,am654-sdhci-5.1", "arasan,sdhci-5.1": TI AM654 MMC PHY
Note: This binding has been deprecated and moved to [5].
@@ -29,7 +31,13 @@ Required Properties for "arasan,sdhci-5.1":
- phys: From PHY bindings: Phandle for the Generic PHY for arasan.
- phy-names: MUST be "phy_arasan".
+Required Properties for "xlnx,zynqmp-8.9a" and "xlnx,versal-8.9a":
+ - xlnx,mio_bank: The value will be 0/1/2 depending on MIO bank selection.
+ - xlnx,device_id: Unique Id of the device, value will be 0/1.
+
Optional Properties:
+ - broken-mmc-highspeed: Indicates to force
+ the controller to use in standard speed.
- arasan,soc-ctl-syscon: A phandle to a syscon device (see ../mfd/syscon.txt)
used to access core corecfg registers. Offsets of registers in this
syscon are determined based on the main compatible string for the device.
@@ -44,6 +52,28 @@ Optional Properties:
properly. Test mode can be used to force the controller to function.
- xlnx,int-clock-stable-broken: when present, the controller always reports
that the internal clock is stable even when it is not.
+ - pinctrl-0: pin control group to be used for this controller.
+ - pinctrl-names: must contain a "default" entry.
+
+Optional Properties for "xlnx,zynqmp-8.9a":
+ - nvmem-cells: list of phandle to the nvmem data cells.
+ - nvmem-cell-names: Names for the each nvmem-cells specified.
+ - xlnx,itap-delay-sd-hsd: Input Tap Delay for SD HS.
+ - xlnx,itap-delay-sdr25: Input Tap Delay for SDR25.
+ - xlnx,itap-delay-sdr50: Input Tap Delay for SDR50.
+ - xlnx,itap-delay-sdr104: Input Tap Delay for SDR104.
+ - xlnx,itap-delay-sd-ddr50: Input Tap Delay for SD DDR50.
+ - xlnx,itap-delay-mmc-hsd: Input Tap Delay for MMC HS.
+ - xlnx,itap-delay-mmc-ddr52: Input Tap Delay for MMC DDR52.
+ - xlnx,itap-delay-mmc-hs200: Input Tap Delay for MMC HS200.
+ - xlnx,otap-delay-sd-hsd: Output Tap Delay for SD HS.
+ - xlnx,otap-delay-sdr25: Output Tap Delay for SDR25.
+ - xlnx,otap-delay-sdr50: Output Tap Delay for SDR50.
+ - xlnx,otap-delay-sdr104: Output Tap Delay for SDR104.
+ - xlnx,otap-delay-sd-ddr50: Output Tap Delay for DDR50.
+ - xlnx,otap-delay-mmc-hsd: Output Tap Delay for MMC HS.
+ - xlnx,otap-delay-mmc-ddr52: Output Tap Delay for MMC DDR52.
+ - xlnx,otap-delay-mmc-hs200: Output Tap Delay for MMC HS200.
Example:
sdhci@e0100000 {
diff --git a/Documentation/devicetree/bindings/mtd/arasan_nand.txt b/Documentation/devicetree/bindings/mtd/arasan_nand.txt
new file mode 100644
index 000000000000..546ed98d9777
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/arasan_nand.txt
@@ -0,0 +1,33 @@
+Arasan NAND Flash Controller with ONFI 3.1 support
+
+Required properties:
+- compatible: Should be "xlnx,zynqmp-nand", "arasan,nfc-v3p10"
+- reg: Memory map for module access
+- interrupt-parent: Interrupt controller the interrupt is routed through
+- interrupts: Should contain the interrupt for the device
+- clock-name: List of input clocks - "sys", "flash"
+ (See clock bindings for details)
+- clocks: Clock phandles (see clock bindings for details)
+
+Required properties for child node:
+- nand-ecc-mode: see nand.txt
+
+For NAND partition information please refer the below file
+Documentation/devicetree/bindings/mtd/partition.txt
+
+Example:
+ nfc: nand@ff100000 {
+ compatible = "xlnx,zynqmp-nand", "arasan,nfc-v3p10"
+ reg = <0x0 0xff100000 0x1000>;
+ clock-name = "sys", "flash"
+ clocks = <&misc_clk &misc_clk>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 14 4>;
+ #address-cells = <1>;
+ #size-cells = <0>
+
+ nand@0 {
+ reg = <0>
+ nand-ecc-mode = "hw";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 9c5e94482b5f..199e6f616dc4 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -35,12 +35,19 @@ Optional properties for PHY child node:
up via magic packet.
- phy-handle : see ethernet.txt file in the same directory
+Optional properties:
+- tsu-clk: Time stamp unit clock frequency used.
+- rx-watermark: Set watermark value for pbuf_rxcutthru reg and enable
+ rx partial store and forward, only when compatible = "cdns,zynqmp-gem".
+ Value should be less than 0xFFF.
+
Examples:
macb0: ethernet@fffc4000 {
compatible = "cdns,at32ap7000-macb";
reg = <0xfffc4000 0x4000>;
interrupts = <21>;
+ rx-watermark = /bits/ 16 <0x44>;
phy-mode = "rmii";
local-mac-address = [3a 0e 03 04 05 06];
clock-names = "pclk", "hclk", "tx_clk";
diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt
index 9ef9338aaee1..c6bfaea1336d 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83867.txt
+++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt
@@ -27,6 +27,10 @@ Optional property:
for details.
- ti,clk-output-sel - Muxing option for CLK_OUT pin - see dt-bindings/net/ti-dp83867.h
for applicable values.
+ - ti,6-wire-mode - This denotes the fact that the board has SGMII
+ 6-wire mode configuration. If this
+ property is not present default is 4-wire
+ mode. See data manual for details.
Note: ti,min-output-impedance and ti,max-output-impedance are mutually
exclusive. When both properties are present ti,max-output-impedance
diff --git a/Documentation/devicetree/bindings/net/xilinx-phy.txt b/Documentation/devicetree/bindings/net/xilinx-phy.txt
new file mode 100644
index 000000000000..aeb9917497b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx-phy.txt
@@ -0,0 +1,15 @@
+Xilinx PCS/PMA PHY bindings
+
+Required properties:
+ - reg - The ID number for the phy, usually a small integer
+
+Optional properties:
+ - xlnx,phy-type - Describes type 1000BaseX (set to 0x5) or
+ SGMII (set to 0x4)
+
+Example:
+
+ ethernet-phy@9 {
+ reg = <9>;
+ xlnx,phy-type = <0x5>;
+ };
diff --git a/Documentation/devicetree/bindings/net/xilinx-tsn-ethernet.txt b/Documentation/devicetree/bindings/net/xilinx-tsn-ethernet.txt
new file mode 100644
index 000000000000..e66b64bc10e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx-tsn-ethernet.txt
@@ -0,0 +1,54 @@
+Xilinx TSN (time sensitive networking) TEMAC axi ethernet driver (xilinx_axienet)
+-----------------------------------------------------------------------
+
+Required properties:
+- compatible : Should be "xlnx,tsn-ethernet-1.00.a".
+- reg : Physical base address and size of the TSN registers map.
+- interrupts : Property with a value describing the interrupt
+ number.
+- interrupts-names : Property denotes the interrupt names.
+- interrupt-parent : Must be core interrupt controller.
+- phy-handle : See ethernet.txt file [1].
+- local-mac-address : See ethernet.txt file [1].
+- phy-mode : see ethernet.txt file [1].
+
+Optional properties:
+- xlnx,tsn : Denotes a ethernet with TSN capabilities.
+- xlnx,tsn-slave : Denotes a TSN slave port.
+- xlnx,txcsum : Tx checksum mode (Full, Partial and None).
+- xlnx,rxcsum : Rx checksum mode (Full, Partial and None).
+- xlnx,phy-type : Xilinx phy device type. See xilinx-phy.txt [2].
+- xlnx,eth-hasnobuf : Used when 1G MAC is configured in non-processor mode.
+- xlnx,num-queue : Number of queue supported in current design, range is
+ 2 to 5 and default value is 5.
+- xlnx,num-tc : Number of traffic class supported in current design,
+ range is 2,3 and default value is 3. It denotes
+ the traffic classes based on VLAN-PCP value.
+- xlnx,qbv-addr : Denotes mac scheduler physical base address.
+- xlnx,qbv-size : Denotes mac scheduler address space size.
+
+[1] Documentation/devicetree/bindings/net/ethernet.txt
+[2] Documentation/devicetree/bindings/net/xilinx-phy.txt
+
+Example:
+
+ tsn_emac_0: tsn_mac@80040000 {
+ compatible = "xlnx,tsn-ethernet-1.00.a";
+ interrupt-parent = <&gic>;
+ interrupts = <0 104 4 0 106 4 0 91 4 0 110 4>;
+ interrupt-names = "interrupt_ptp_rx_1", "interrupt_ptp_tx_1", "mac_irq_1", "interrupt_ptp_timer";
+ local-mac-address = [ 00 0A 35 00 01 0e ];
+ phy-mode = "rgmii";
+ reg = <0x0 0x80040000 0x0 0x14000>;
+ tsn,endpoint = <&tsn_ep>;
+ xlnx,tsn;
+ xlnx,tsn-slave;
+ xlnx,phy-type = <0x3>;
+ xlnx,eth-hasnobuf;
+ xlnx,num-queue = <0x2>;
+ xlnx,num-tc = <0x3>;
+ xlnx,qbv-addr = <0x80054000>;
+ xlnx,qbv-size = <0x2000>;
+ xlnx,txsum = <0>;
+ xlnx,rxsum = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/net/xilinx_axienet.txt b/Documentation/devicetree/bindings/net/xilinx_axienet.txt
index 38f9ec076743..4cb9a50b7d2a 100644
--- a/Documentation/devicetree/bindings/net/xilinx_axienet.txt
+++ b/Documentation/devicetree/bindings/net/xilinx_axienet.txt
@@ -1,55 +1,104 @@
-XILINX AXI ETHERNET Device Tree Bindings
+XILINX AXI ETHERNET driver (xilinx_axienet)
--------------------------------------------------------
-Also called AXI 1G/2.5G Ethernet Subsystem, the xilinx axi ethernet IP core
-provides connectivity to an external ethernet PHY supporting different
-interfaces: MII, GMII, RGMII, SGMII, 1000BaseX. It also includes two
-segments of memory for buffering TX and RX, as well as the capability of
-offloading TX/RX checksum calculation off the processor.
+This driver supports following MAC configurations-
+a) AXI 1G/2.5G Ethernet Subsystem.
+b) 10G/25G High Speed Ethernet Subsystem.
+c) 10 Gigabit Ethernet Subsystem.
+d) USXGMII Ethernet Subsystem.
-Management configuration is done through the AXI interface, while payload is
-sent and received through means of an AXI DMA controller. This driver
-includes the DMA driver code, so this driver is incompatible with AXI DMA
-driver.
+Management configuration is done through the AXI4-Lite interface.
+The transmit and receive data interface is via the AXI4-Stream
+interface connected to DMA controller. This driver supports Xilinx
+AXI DMA and MCDMA DMA IP's. Programming sequence for these DMA IP's
+is included in the xilinx_axienet driver.
-For more details about mdio please refer phy.txt file in the same directory.
+For details about MDIO please refer phy.txt [1].
Required properties:
-- compatible : Must be one of "xlnx,axi-ethernet-1.00.a",
- "xlnx,axi-ethernet-1.01.a", "xlnx,axi-ethernet-2.01.a"
+- compatible : Must be one of "xlnx,axi-ethernet-1.00.a" or
+ "xlnx,axi-ethernet-1.01.a" or "xlnx,axi-ethernet-2.01.a"
+ for 1G MAC,
+ "xlnx,ten-gig-eth-mac" for 10 Gigabit Ethernet Subsystem,
+ "xlnx,xxv-ethernet-1.0" for 10G/25G MAC,
+ "xlnx,axi-2_5-gig-ethernet-1.0" for 2.5G MAC and
+ "xlnx,xxv-usxgmii-ethernet-1.0" for USXGMII.
- reg : Address and length of the IO space.
- interrupts : Should be a list of two interrupt, TX and RX.
-- phy-handle : Should point to the external phy device.
- See ethernet.txt file in the same directory.
-- xlnx,rxmem : Set to allocated memory buffer for Rx/Tx in the hardware
+- interrupt-parent : Must be core interrupt controller.
+- phy-handle : See ethernet.txt [2].
+- local-mac-address : See ethernet.txt [2].
+- phy-mode : See ethernet.txt [2].
+- axistream-connected : Should contain phandle of DMA node.
+Required properties (When AxiEthernet is configured with MCDMA):
+- xlnx,channel-ids : Queue Identifier associated with the MCDMA Channel.
+- interrupt-names : Should contain the interrupt names.
Optional properties:
-- phy-mode : See ethernet.txt
-- xlnx,phy-type : Deprecated, do not use, but still accepted in preference
- to phy-mode.
-- xlnx,txcsum : 0 or empty for disabling TX checksum offload,
- 1 to enable partial TX checksum offload,
- 2 to enable full TX checksum offload
-- xlnx,rxcsum : Same values as xlnx,txcsum but for RX checksum offload
-
-Example:
- axi_ethernet_eth: ethernet@40c00000 {
- compatible = "xlnx,axi-ethernet-1.00.a";
- device_type = "network";
- interrupt-parent = <&microblaze_0_axi_intc>;
- interrupts = <2 0>;
- phy-mode = "mii";
- reg = <0x40c00000 0x40000>;
- xlnx,rxcsum = <0x2>;
- xlnx,rxmem = <0x800>;
- xlnx,txcsum = <0x2>;
- phy-handle = <&phy0>;
- axi_ethernetlite_0_mdio: mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- phy0: phy@0 {
- device_type = "ethernet-phy";
- reg = <1>;
+- xlnx,rxmem : Max Rx Memory size.
+- xlnx,txcsum : Tx checksum mode (Full, Partial and None).
+- xlnx,rxcsum : Rx checksum mode (Full, Partial and None).
+- xlnx,phy-type : Xilinx phy device type. See xilinx-phy.txt [3].
+- dma-coherent : Present if dma operations are coherent.
+- xlnx,eth-hasnobuf : Used when 1G MAC is configured in non-processor mode.
+- xlnx,rxtsfifo : Configures the axi fifo for receive timestamping.
+- xlnx,eth-hasptp : Tells whether PTP is enabled in h/w or not.
+- axififo-connected : Should contain the phandle of AXI stream fifo.
+- clocks : Input clock specifier. Refer to common clock bindings.
+- clock-names : Input clock names. Refer to IP PG for signal description.
+ 1G/2.5G: s_axi_lite_clk, axis_clk and ref_clk.
+ 10G/25G and USXGMII: s_axi_aclk, rx_core_clk and dclk.
+ 10 Gigabit: s_axi_aclk and dclk.
+ AXI DMA and MCDMA: m_axi_sg_aclk, m_axi_mm2s_aclk and
+ m_axi_s2mm_aclk.
+
+Optional properties (When AxiEthernet is configured with MCDMA):
+- xlnx,num-queues : Number of queues/channels configured in h/w.
+Optional properties (When USXGMII is in use):
+- xlnx,usxgmii-rate : USXGMII PHY speed - can be 10, 100, 1000, 2500,
+ 5000 or 10000.
+Optional properties for connected DMA node:
+- xlnx,addrwidth : Specify the width of the DMA address space in bits.
+ Valid range is 32-64. Default is 32.
+- xlnx,include-dre : Tells whether DMA h/w is configured with data
+ realignment engine(DRE) or not.
+
+NOTE: Time Sensitive Networking (TSN) related DT bindings are explained in [4].
+
+[1] Documentation/devicetree/bindings/net/phy.txt
+[2] Documentation/devicetree/bindings/net/ethernet.txt
+[3] Documentation/devicetree/bindings/net/xilinx-phy.txt
+[4] Documentation/devicetree/bindings/net/xilinx_tsn.txt
+
+Example: AXI 1G/2.5G Ethernet Subsystem + AXIDMA
+
+ axi_eth_0_dma: dma@80040000 {
+ #dma-cells = <1>;
+ compatible = "xlnx,eth-dma";
+ <snip>
+ };
+
+ axi_eth_0: ethernet@80000000 {
+ axistream-connected = <&axi_eth_0_dma>;
+ compatible = "xlnx,axi-ethernet-1.00.a";
+ device_type = "network";
+ interrupt-names = "interrupt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 91 4>;
+ phy-handle = <&phy2>;
+ phy-mode = "sgmii";
+ reg = <0x0 0x80000000 0x0 0x40000>;
+ xlnx,include-dre ;
+ xlnx,phy-type = <0x5>;
+ xlnx,rxcsum = <0x0>;
+ xlnx,rxmem = <0x1000>;
+ xlnx,txcsum = <0x0>;
+ axi_eth_0_mdio: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy2: phy@2 {
+ device_type = "ethernet-phy";
+ reg = <2>;
+ };
};
- };
};
diff --git a/Documentation/devicetree/bindings/net/xilinx_emaclite.txt b/Documentation/devicetree/bindings/net/xilinx_emaclite.txt
new file mode 100644
index 000000000000..55965d942f97
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_emaclite.txt
@@ -0,0 +1,34 @@
+Xilinx Axi Ethernetlite controller Device Tree Bindings
+---------------------------------------------------------
+
+Required properties:
+- compatible : Should be "xlnx,opb-ethernetlite-1.01.a" or
+ "xlnx,opb-ethernetlite-1.01.b" or
+ "xlnx,opb-ethernetlite-1.00.a" or
+ "xlnx,xps-ethernetlite-2.00.a" or
+ "xlnx,xps-ethernetlite-2.01.a" or
+ "xlnx,xps-ethernetlite-3.00.a" or.
+- reg : Physical base address and size of the Axi ethernetlite
+ registers map.
+- interrupts : Property with a value describing the interrupt
+ number.
+- interrupt-parent : Must be core interrupt controller.
+- phy-handle : See ethernet.txt file in the same directory.
+- local-mac-address : See ethernet.txt file in the same directory.
+
+Optional properties:
+- xlnx,tx-ping-pong : If present, hardware supports tx ping pong buffer.
+- xlnx,rx-ping-pong : If present, hardware supports rx ping pong buffer.
+
+Example:
+ axi_ethernetlite_1: ethernet@40e00000 {
+ compatible = "xlnx,axi-ethernetlite-3.0", "xlnx,xps-ethernetlite-1.00.a";
+ device_type = "network";
+ interrupt-parent = <&axi_intc_1>;
+ interrupts = <1 0>;
+ local-mac-address = [00 0a 35 00 00 00];
+ phy-handle = <&phy0>;
+ reg = <0x40e00000 0x10000>;
+ xlnx,rx-ping-pong;
+ xlnx,tx-ping-pong;
+ }
diff --git a/Documentation/devicetree/bindings/net/xilinx_tsn.txt b/Documentation/devicetree/bindings/net/xilinx_tsn.txt
new file mode 100644
index 000000000000..8ef9fa9f3968
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_tsn.txt
@@ -0,0 +1,14 @@
+Xilinx TSN (time sensitive networking) IP driver (xilinx_tsn_ip)
+-----------------------------------------------------------------------
+
+Required properties:
+- compatible : Should be one of "xlnx,tsn-endpoint-ethernet-mac-1.0",
+ "xlnx,tsn-endpoint-ethernet-mac-2.0" for TSN.
+- reg : Physical base address and size of the TSN registers map.
+
+Example:
+
+ tsn_endpoint_ip_0: tsn_endpoint_ip_0 {
+ compatible = "xlnx,tsn-endpoint-ethernet-mac-2.0";
+ reg = <0x0 0x80040000 0x0 0x40000>;
+ };
diff --git a/Documentation/devicetree/bindings/net/xilinx_tsn_ep.txt b/Documentation/devicetree/bindings/net/xilinx_tsn_ep.txt
new file mode 100644
index 000000000000..f42e5417d164
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_tsn_ep.txt
@@ -0,0 +1,35 @@
+Xilinx TSN (time sensitive networking) EndPoint Driver (xilinx_tsn_ep)
+-------------------------------------------------------------------------------
+
+Required properties:
+- compatible : Should be "xlnx,tsn-ep"
+- reg : Physical base address and size of the TSN Endpoint
+ registers map
+- interrupts : Property with a value describing the interrupt
+- interrupts-names : Property denotes the interrupt names.
+- interrupt-parent : Must be core interrupt controller.
+- local-mac-address : See ethernet.txt [1].
+
+Optional properties:
+- xlnx,num-tc : Number of traffic class supported in current design,
+ range is 2,3 and default value is 3. It denotes
+ the traffic classes based on VLAN-PCP value.
+- xlnx,channel-ids : Queue Identifier associated with the MCDMA Channel, range
+ is Tx: "1 to 2" and Rx: "2 to 5", default value is "1 to 5".
+- xlnx,eth-hasnobuf : Used when 1G MAC is configured in non processor mode.
+
+[1] Documentation/devicetree/bindings/net/ethernet.txt
+
+Example:
+
+ tsn_ep: tsn_ep@80056000 {
+ compatible = "xlnx,tsn-ep";
+ reg = <0x0 0x80056000 0x0 0xA000>;
+ xlnx,num-tc = <0x3>;
+ interrupt-names = "tsn_ep_scheduler_irq";
+ interrupt-parent = <&gic>;
+ interrupts = <0 111 4>;
+ local-mac-address = [00 0A 35 00 01 10];
+ xlnx,channel-ids = "1","2","3","4","5";
+ xlnx,eth-hasnobuf ;
+ };
diff --git a/Documentation/devicetree/bindings/net/xilinx_tsn_switch.txt b/Documentation/devicetree/bindings/net/xilinx_tsn_switch.txt
new file mode 100644
index 000000000000..898e5b7b57e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_tsn_switch.txt
@@ -0,0 +1,23 @@
+Xilinx TSN (time sensitive networking) Switch Driver (xilinx_tsn_switch)
+-----------------------------------------------------------------------------
+
+Required properties:
+- compatible : Should be "xlnx,tsn-switch"
+- reg : Physical base address and size of the TSN registers map.
+
+Optional properties:
+- xlnx,num-tc : Number of traffic class supported in current design,
+ range is 2,3 and default value is 3. It denotes
+ the traffic classes based on VLAN-PCP value.
+- xlnx,has-hwaddr-learning : Denotes hardware address learning support
+- xlnx,has-inband-mgmt-tag : Denotes inband management support
+
+Example:
+
+ epswitch: tsn_switch@80078000 {
+ compatible = "xlnx,tsn-switch";
+ reg = <0x0 0x80078000 0x0 0x4000>;
+ xlnx,num-tc = <0x3>;
+ xlnx,has-hwaddr-learning ;
+ xlnx,has-inband-mgmt-tag ;
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt
index 4881561b3a02..be126ccf4802 100644
--- a/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt
+++ b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt
@@ -25,9 +25,78 @@ firmware {
#size-cells = <1>;
/* Data cells */
- soc_revision: soc_revision {
+ soc_revision: soc_revision@0 {
reg = <0x0 0x4>;
};
+ /*
+ * efuse memory access:
+ * all the efuse feilds need to be read
+ * with the exact size specified in the node
+ */
+ /* DNA */
+ efuse_dna: efuse_dna@c {
+ reg = <0xc 0xc>;
+ };
+ /* User 0 */
+ efuse_usr0: efuse_usr0@20 {
+ reg = <0x20 0x4>;
+ };
+ /* User 1 */
+ efuse_usr1: efuse_usr1@24 {
+ reg = <0x24 0x4>;
+ };
+ /* User 2 */
+ efuse_usr2: efuse_usr2@28 {
+ reg = <0x28 0x4>;
+ };
+ /* User 3 */
+ efuse_usr3: efuse_usr3@2c {
+ reg = <0x2c 0x4>;
+ };
+ /* User 4 */
+ efuse_usr4: efuse_usr4@30 {
+ reg = <0x30 0x4>;
+ };
+ /* User 5 */
+ efuse_usr5: efuse_usr5@34 {
+ reg = <0x34 0x4>;
+ };
+ /* User 6 */
+ efuse_usr6: efuse_usr6@38 {
+ reg = <0x38 0x4>;
+ };
+ /* User 7 */
+ efuse_usr7: efuse_usr7@3c {
+ reg = <0x3c 0x4>;
+ };
+ /* Misc user control bits */
+ efuse_miscusr: efuse_miscusr@40 {
+ reg = <0x40 0x4>;
+ };
+ /* PUF chash */
+ efuse_chash: efuse_chash@50 {
+ reg = <0x50 0x4>;
+ };
+ /* PUF misc */
+ efuse_pufmisc: efuse_pufmisc@54 {
+ reg = <0x54 0x4>;
+ };
+ /* SEC_CTRL */
+ efuse_sec: efuse_sec@58 {
+ reg = <0x58 0x4>;
+ };
+ /* SPK ID */
+ efuse_spkid: efuse_spkid@5c {
+ reg = <0x5c 0x4>;
+ };
+ /* PPK0 hash */
+ efuse_ppk0hash: efuse_ppk0hash@a0 {
+ reg = <0xa0 0x30>;
+ };
+ /* PPK1 hash */
+ efuse_ppk1hash: efuse_ppk1hash@d0 {
+ reg = <0xd0 0x30>;
+ };
};
};
};
@@ -44,3 +113,22 @@ For example:
...
};
+
+To program efuse memory, one should request specified bytes of size as below,
+NOTE: Efuse bits once programmed cannot be reverted.
+
+ - | TYPE | OFFSET | SIZE(bytes) |
+ - |User-0 | 0x20 | 0x4 |
+ - |User-1 | 0x24 | 0x4 |
+ - |User-2 | 0x28 | 0x4 |
+ - |User-3 | 0x2C | 0x4 |
+ - |User-4 | 0x30 | 0x4 |
+ - |User-5 | 0x34 | 0x4 |
+ - |User-6 | 0x38 | 0x4 |
+ - |User-7 | 0x3c | 0x4 |
+ - |Misc User | 0x40 | 0x4 |
+ - |SEC_CTRL | 0x58 | 0x4 |
+ - |SPK ID | 0x5C | 0x4 |
+ - |AES KEY | 0x60 | 0x20 |
+ - |PPK0 hash | 0xA0 | 0x30 |
+ - |PPK1 hash | 0xD0 | 0x30 |
diff --git a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
index 01bf7fdf4c19..c12bcf0f8947 100644
--- a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
@@ -18,6 +18,7 @@ Required properties:
"msi1, msi0": interrupt asserted when an MSI is received
"intx": interrupt asserted when a legacy interrupt is received
"misc": interrupt asserted when miscellaneous interrupt is received
+- clocks: Should contain a clock specifier for the device
- interrupt-map-mask and interrupt-map: standard PCI properties to define the
mapping of the PCI interface to interrupt numbers.
- ranges: ranges for the PCI memory regions (I/O space region is not
@@ -52,6 +53,7 @@ nwl_pcie: pcie@fd0e0000 {
<0x0 0x0 0x0 0x2 &pcie_intc 0x2>,
<0x0 0x0 0x0 0x3 &pcie_intc 0x3>,
<0x0 0x0 0x0 0x4 &pcie_intc 0x4>;
+ clocks = <&clkc 23>
msi-parent = <&nwl_pcie>;
reg = <0x0 0xfd0e0000 0x0 0x1000>,
diff --git a/Documentation/devicetree/bindings/pci/xilinx-xdma-pl-pcie.txt b/Documentation/devicetree/bindings/pci/xilinx-xdma-pl-pcie.txt
new file mode 100644
index 000000000000..92b7194a2f7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/xilinx-xdma-pl-pcie.txt
@@ -0,0 +1,87 @@
+* Xilinx XDMA PL PCIe Root Port Bridge DT description
+
+Required properties:
+- #address-cells: Address representation for root ports, set to <3>
+- #size-cells: Size representation for root ports, set to <2>
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+- compatible: Should contain "xlnx,xdma-host-3.00"
+- reg: Should contain XDMA PCIe registers location and length
+- device_type: must be "pci"
+- interrupts: Should contain AXI PCIe interrupt
+- interrupt-map-mask,
+ interrupt-map: standard PCI properties to define the mapping of the
+ PCI interface to interrupt numbers.
+- ranges: ranges for the PCI memory regions (I/O space region is not
+ supported by hardware)
+ Please refer to the standard PCI bus binding document for a more
+ detailed explanation
+
+For MSI DECODE mode:
+- interrupt-names: Must include the following entries:
+ "misc": interrupt asserted when legacy or error interrupt is received
+ "msi1, msi0": interrupt asserted when an MSI is received
+
+Interrupt controller child node
++++++++++++++++++++++++++++++++
+Required properties:
+- interrupt-controller: identifies the node as an interrupt controller
+- #address-cells: specifies the number of cells needed to encode an
+ address. The value must be 0.
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+
+NOTE:
+The core provides a single interrupt for both INTx/MSI messages. So,
+created a interrupt controller node to support 'interrupt-map' DT
+functionality. The driver will create an IRQ domain for this map, decode
+the four INTx interrupts in ISR and route them to this domain.
+
+
+Example:
+++++++++
+MSI FIFO mode:
+ xdma_0: axi-pcie@a0000000 {
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ compatible = "xlnx,xdma-host-3.00";
+ device_type = "pci";
+ interrupt-map = <0 0 0 1 &pcie_intc_0 1>,
+ <0 0 0 2 &pcie_intc_0 2>,
+ <0 0 0 3 &pcie_intc_0 3>,
+ <0 0 0 4 &pcie_intc_0 4>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 89 4>;
+ ranges = <0x02000000 0x00000000 0xB0000000 0x0 0xB0000000 0x00000000 0x01000000>,
+ <0x43000000 0x00000005 0x00000000 0x00000005 0x00000000 0x00000000 0x01000000>;
+ reg = <0x0 0xA0000000 0x0 0x10000000>;
+ pcie_intc_0: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller ;
+ };
+ };
+
+MSI DECODE mode:
+ xdma_0: axi-pcie@a0000000 {
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ compatible = "xlnx,xdma-host-3.00";
+ device_type = "pci";
+ interrupt-map = <0 0 0 1 &pcie_intc_0 1>, <0 0 0 2 &pcie_intc_0 2>, <0 0 0 3 &pcie_intc_0 3>, <0 0 0 4 &pcie_intc_0 4>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-parent = <&gic>;
+ interrupt-names = "misc", "msi0", "msi1";
+ interrupts = <0 89 4>, <0 90 4>, <0 91 4>;
+ ranges = <0x02000000 0x00000000 0xB0000000 0x0 0xB0000000 0x00000000 0x01000000>,
+ <0x43000000 0x00000005 0x00000000 0x00000005 0x00000000 0x00000000 0x01000000>;
+ reg = <0x0 0xA0000000 0x0 0x10000000>;
+ pcie_intc_0: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller ;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-zynqmp.txt b/Documentation/devicetree/bindings/phy/phy-zynqmp.txt
new file mode 100644
index 000000000000..ed080df891a4
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-zynqmp.txt
@@ -0,0 +1,119 @@
+Xilinx ZynqMP PHY binding
+
+This binding describes a ZynqMP PHY device that is used to control ZynqMP
+High Speed Gigabit Transceiver(GT). ZynqMP PS GTR provides four lanes
+and are used by USB, SATA, PCIE, Display port and Ethernet SGMMI controllers.
+
+Required properties (controller (parent) node):
+- compatible : Can be "xlnx,zynqmp-psgtr-v1.1" or "xlnx,zynqmp-psgtr"
+ "xlnx,zynqmp-psgtr-v1.1" has the lpd address mapping removed
+
+- reg : Address and length of register sets for each device in
+ "reg-names"
+- reg-names : The names of the register addresses corresponding to the
+ registers filled in "reg":
+ - serdes: SERDES block register set
+ - siou: SIOU block register set
+ - lpd: Low power domain peripherals reset control
+
+Required nodes : A sub-node is required for each lane the controller
+ provides.
+
+Required properties (port (child) nodes):
+lane0:
+- #phy-cells : Should be 4
+ Cell after port phandle is device type from:
+ - <PHY_TYPE_PCIE 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SATA 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_USB3 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_DP 1 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SGMII 0 LANE_NUM FREQUENCY>
+lane1:
+- #phy-cells : Should be 4
+ Cell after port phandle is device type from:
+ - <PHY_TYPE_PCIE 1 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SATA 1 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_USB3 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_DP 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SGMII 1 LANE_NUM FREQUENCY>
+lane2:
+- #phy-cells : Should be 4
+ Cell after port phandle is device type from:
+ - <PHY_TYPE_PCIE 2 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SATA 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_USB3 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_DP 1 LANE_NUM FREQUENC>
+ - <PHY_TYPE_SGMII 2 LANE_NUM FREQUENCY>
+lane3:
+- #phy-cells : Should be 4
+ Cell after port phandle is device type from:
+ - <PHY_TYPE_PCIE 3 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SATA 1 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_USB3 1 LANE_NUM FREQUENCY >
+ - <PHY_TYPE_DP 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SGMII 3 LANE_NUM FREQUENCY>
+
+Note: LANE_NUM : This determines which lane's reference clock is shared by controller.
+ FREQUENCY: This the clock frequency at which controller wants to operate.
+
+
+Example:
+ serdes: zynqmp_phy@fd400000 {
+ compatible = "xlnx,zynqmp-psgtr";
+ status = "okay";
+ reg = <0x0 0xfd400000 0x0 0x40000>, <0x0 0xfd3d0000 0x0 0x1000>,
+ <0x0 0xff5e0000 0x0 0x1000>;
+ reg-names = "serdes", "siou", "lpd";
+
+ lane0: lane@0 {
+ #phy-cells = <4>;
+ };
+ lane1: lane@1 {
+ #phy-cells = <4>;
+ };
+ lane2: lane@2 {
+ #phy-cells = <4>;
+ };
+ lane3: lane@3 {
+ #phy-cells = <4>;
+ };
+ };
+
+Specifying phy control of devices
+=================================
+
+Device nodes should specify the configuration required in their "phys"
+property, containing a phandle to the phy port node and a device type.
+
+phys = <PHANDLE CONTROLLER_TYPE CONTROLLER_INSTANCE LANE_NUM LANE_FREQ>;
+
+PHANDLE = &lane0 or &lane1 or &lane2 or &lane3
+CONTROLLER_TYPE = PHY_TYPE_PCIE or PHY_TYPE_SATA or PHY_TYPE_USB
+ or PHY_TYPE_DP or PHY_TYPE_SGMII
+CONTROLLER_INSTANCE = Depends on controller type used, can be any of
+ PHY_TYPE_PCIE : 0 or 1 or 2 or 3
+ PHY_TYPE_SATA : 0 or 1
+ PHY_TYPE_USB : 0 or 1
+ PHY_TYPE_DP : 0 or 1
+ PHY_TYPE_SGMII: 0 or 1 or 2 or 3
+LANE_NUM = Depends on which lane clock is used as ref clk, can be
+ 0 or 1 or 2 or 3
+LANE_FREQ = Frequency that controller can operate, can be any of
+ 19.2Mhz,20Mhz,24Mhz,26Mhz,27Mhz,28.4Mhz,40Mhz,52Mhz,
+ 100Mhz,108Mhz,125Mhz,135Mhz,150Mhz
+
+Example:
+
+#include <dt-bindings/phy/phy.h>
+
+ usb@fe200000 {
+ ...
+ phys = <&lane2 PHY_TYPE_USB3 0 2 2600000>;
+ ...
+ };
+
+ ahci@fd0c0000 {
+ ...
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.txt
new file mode 100644
index 000000000000..3007f6f4705d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.txt
@@ -0,0 +1,275 @@
+ Binding for Xilinx ZynqMP Pinctrl
+
+Required properties:
+- compatible: "xlnx,zynqmp-pinctrl"
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+ZynqMP's pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, slew rate, etc.
+
+Each configuration node can consist of multiple nodes describing the pinmux and
+pinconf options. Those nodes can be pinmux nodes or pinconf nodes.
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Required properties for pinmux nodes are:
+ - groups: A list of pinmux groups.
+ - function: The name of a pinmux function to activate for the specified set
+ of groups.
+
+Required properties for configuration nodes:
+One of:
+ - pins: A list of pin names
+ - groups: A list of pinmux groups.
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pinmux subnode:
+ groups, function
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pinconf subnode:
+ groups, pins, bias-disable, bias-pull-up, bias-pull-down, slew-rate
+
+ Valid arguments for 'slew-rate' are 'SLEW_RATE_SLOW' and 'SLEW_RATE_FAST' to
+ select between slow and fast respectively.
+
+ Valid values for groups are:
+ ethernet0_0_grp, ethernet1_0_grp, ethernet2_0_grp,
+ ethernet3_0_grp, gemtsu0_0_grp, gemtsu0_1_grp,
+ gemtsu0_2_grp, mdio0_0_grp, mdio1_0_grp,
+ mdio1_1_grp, mdio2_0_grp, mdio3_0_grp,
+ qspi0_0_grp, qspi_ss_0_grp, qspi_fbclk_0_grp,
+ spi0_0_grp, spi0_ss_0_grp, spi0_ss_1_grp,
+ spi0_ss_2_grp, spi0_1_grp, spi0_ss_3_grp,
+ spi0_ss_4_grp, spi0_ss_5_grp, spi0_2_grp,
+ spi0_ss_6_grp, spi0_ss_7_grp, spi0_ss_8_grp,
+ spi0_3_grp, spi0_ss_9_grp, spi0_ss_10_grp,
+ spi0_ss_11_grp, spi0_4_grp, spi0_ss_12_grp,
+ spi0_ss_13_grp, spi0_ss_14_grp, spi0_5_grp,
+ spi0_ss_15_grp, spi0_ss_16_grp, spi0_ss_17_grp,
+ spi1_0_grp, spi1_ss_0_grp, spi1_ss_1_grp,
+ spi1_ss_2_grp, spi1_1_grp, spi1_ss_3_grp,
+ spi1_ss_4_grp, spi1_ss_5_grp, spi1_2_grp,
+ spi1_ss_6_grp, spi1_ss_7_grp, spi1_ss_8_grp,
+ spi1_3_grp, spi1_ss_9_grp, spi1_ss_10_grp,
+ spi1_ss_11_grp, spi1_4_grp, spi1_ss_12_grp,
+ spi1_ss_13_grp, spi1_ss_14_grp, spi1_5_grp,
+ spi1_ss_15_grp, spi1_ss_16_grp, spi1_ss_17_grp,
+ sdio0_0_grp, sdio0_1_grp, sdio0_2_grp,
+ sdio0_3_grp, sdio0_4_grp, sdio0_5_grp,
+ sdio0_6_grp, sdio0_7_grp, sdio0_8_grp,
+ sdio0_9_grp, sdio0_10_grp, sdio0_11_grp,
+ sdio0_12_grp, sdio0_13_grp, sdio0_14_grp,
+ sdio0_15_grp, sdio0_16_grp, sdio0_17_grp,
+ sdio0_18_grp, sdio0_19_grp, sdio0_20_grp,
+ sdio0_21_grp, sdio0_22_grp, sdio0_23_grp,
+ sdio0_24_grp, sdio0_25_grp, sdio0_26_grp,
+ sdio0_27_grp, sdio0_28_grp, sdio0_29_grp,
+ sdio0_30_grp, sdio0_31_grp, sdio0_32_grp,
+ sdio0_pc_0_grp, sdio0_cd_0_grp, sdio0_wp_0_grp,
+ sdio0_pc_1_grp, sdio0_cd_1_grp, sdio0_wp_1_grp,
+ sdio0_pc_2_grp, sdio0_cd_2_grp, sdio0_wp_2_grp,
+ sdio1_0_grp, sdio1_1_grp, sdio1_2_grp,
+ sdio1_3_grp, sdio1_4_grp, sdio1_5_grp,
+ sdio1_6_grp, sdio1_7_grp, sdio1_8_grp,
+ sdio1_9_grp, sdio1_10_grp, sdio1_11_grp,
+ sdio1_12_grp, sdio1_13_grp, sdio1_14_grp,
+ sdio1_15_grp, sdio1_pc_0_grp, sdio1_cd_0_grp,
+ sdio1_wp_0_grp, sdio1_pc_1_grp, sdio1_cd_1_grp,
+ sdio1_wp_1_grp, nand0_0_grp, nand0_ce_0_grp,
+ nand0_rb_0_grp, nand0_dqs_0_grp, nand0_ce_1_grp,
+ nand0_rb_1_grp, nand0_dqs_1_grp, can0_0_grp,
+ can0_1_grp, can0_2_grp, can0_3_grp,
+ can0_4_grp, can0_5_grp, can0_6_grp,
+ can0_7_grp, can0_8_grp, can0_9_grp,
+ can0_10_grp, can0_11_grp, can0_12_grp,
+ can0_13_grp, can0_14_grp, can0_15_grp,
+ can0_16_grp, can0_17_grp, can0_18_grp,
+ can1_0_grp, can1_1_grp, can1_2_grp,
+ can1_3_grp, can1_4_grp, can1_5_grp,
+ can1_6_grp, can1_7_grp, can1_8_grp,
+ can1_9_grp, can1_10_grp, can1_11_grp,
+ can1_12_grp, can1_13_grp, can1_14_grp,
+ can1_15_grp, can1_16_grp, can1_17_grp,
+ can1_18_grp, can1_19_grp, uart0_0_grp,
+ uart0_1_grp, uart0_2_grp, uart0_3_grp,
+ uart0_4_grp, uart0_5_grp, uart0_6_grp,
+ uart0_7_grp, uart0_8_grp, uart0_9_grp,
+ uart0_10_grp, uart0_11_grp, uart0_12_grp,
+ uart0_13_grp, uart0_14_grp, uart0_15_grp,
+ uart0_16_grp, uart0_17_grp, uart0_18_grp,
+ uart1_0_grp, uart1_1_grp, uart1_2_grp,
+ uart1_3_grp, uart1_4_grp, uart1_5_grp,
+ uart1_6_grp, uart1_7_grp, uart1_8_grp,
+ uart1_9_grp, uart1_10_grp, uart1_11_grp,
+ uart1_12_grp, uart1_13_grp, uart1_14_grp,
+ uart1_15_grp, uart1_16_grp, uart1_17_grp,
+ uart1_18_grp, i2c0_0_grp, i2c0_1_grp,
+ i2c0_2_grp, i2c0_3_grp, i2c0_4_grp,
+ i2c0_5_grp, i2c0_6_grp, i2c0_7_grp,
+ i2c0_8_grp, i2c0_9_grp, i2c0_10_grp,
+ i2c0_11_grp, i2c0_12_grp, i2c0_13_grp,
+ i2c0_14_grp, i2c0_15_grp, i2c0_16_grp,
+ i2c0_17_grp, i2c0_18_grp, i2c1_0_grp,
+ i2c1_1_grp, i2c1_2_grp, i2c1_3_grp,
+ i2c1_4_grp, i2c1_5_grp, i2c1_6_grp,
+ i2c1_7_grp, i2c1_8_grp, i2c1_9_grp,
+ i2c1_10_grp, i2c1_11_grp, i2c1_12_grp,
+ i2c1_13_grp, i2c1_14_grp, i2c1_15_grp,
+ i2c1_16_grp, i2c1_17_grp, i2c1_18_grp,
+ i2c1_19_grp, ttc0_clk_0_grp, ttc0_wav_0_grp,
+ ttc0_clk_1_grp, ttc0_wav_1_grp, ttc0_clk_2_grp,
+ ttc0_wav_2_grp, ttc0_clk_3_grp, ttc0_wav_3_grp,
+ ttc0_clk_4_grp, ttc0_wav_4_grp, ttc0_clk_5_grp,
+ ttc0_wav_5_grp, ttc0_clk_6_grp, ttc0_wav_6_grp,
+ ttc0_clk_7_grp, ttc0_wav_7_grp, ttc0_clk_8_grp,
+ ttc0_wav_8_grp, ttc1_clk_0_grp, ttc1_wav_0_grp,
+ ttc1_clk_1_grp, ttc1_wav_1_grp, ttc1_clk_2_grp,
+ ttc1_wav_2_grp, ttc1_clk_3_grp, ttc1_wav_3_grp,
+ ttc1_clk_4_grp, ttc1_wav_4_grp, ttc1_clk_5_grp,
+ ttc1_wav_5_grp, ttc1_clk_6_grp, ttc1_wav_6_grp,
+ ttc1_clk_7_grp, ttc1_wav_7_grp, ttc1_clk_8_grp,
+ ttc1_wav_8_grp, ttc2_clk_0_grp, ttc2_wav_0_grp,
+ ttc2_clk_1_grp, ttc2_wav_1_grp, ttc2_clk_2_grp,
+ ttc2_wav_2_grp, ttc2_clk_3_grp, ttc2_wav_3_grp,
+ ttc2_clk_4_grp, ttc2_wav_4_grp, ttc2_clk_5_grp,
+ ttc2_wav_5_grp, ttc2_clk_6_grp, ttc2_wav_6_grp,
+ ttc2_clk_7_grp, ttc2_wav_7_grp, ttc2_clk_8_grp,
+ ttc2_wav_8_grp, ttc3_clk_0_grp, ttc3_wav_0_grp,
+ ttc3_clk_1_grp, ttc3_wav_1_grp, ttc3_clk_2_grp,
+ ttc3_wav_2_grp, ttc3_clk_3_grp, ttc3_wav_3_grp,
+ ttc3_clk_4_grp, ttc3_wav_4_grp, ttc3_clk_5_grp,
+ ttc3_wav_5_grp, ttc3_clk_6_grp, ttc3_wav_6_grp,
+ ttc3_clk_7_grp, ttc3_wav_7_grp, ttc3_clk_8_grp,
+ ttc3_wav_8_grp, swdt0_clk_0_grp, swdt0_rst_0_grp,
+ swdt0_clk_1_grp, swdt0_rst_1_grp, swdt0_clk_2_grp,
+ swdt0_rst_2_grp, swdt0_clk_3_grp, swdt0_rst_3_grp,
+ swdt0_clk_4_grp, swdt0_rst_4_grp, swdt0_clk_5_grp,
+ swdt0_rst_5_grp, swdt0_clk_6_grp, swdt0_rst_6_grp,
+ swdt0_clk_7_grp, swdt0_rst_7_grp, swdt0_clk_8_grp,
+ swdt0_rst_8_grp, swdt0_clk_9_grp, swdt0_rst_9_grp,
+ swdt0_clk_10_grp, swdt0_rst_10_grp, swdt0_clk_11_grp,
+ swdt0_rst_11_grp, swdt0_clk_12_grp, swdt0_rst_12_grp,
+ swdt1_clk_0_grp, swdt1_rst_0_grp, swdt1_clk_1_grp,
+ swdt1_rst_1_grp, swdt1_clk_2_grp, swdt1_rst_2_grp,
+ swdt1_clk_3_grp, swdt1_rst_3_grp, swdt1_clk_4_grp,
+ swdt1_rst_4_grp, swdt1_clk_5_grp, swdt1_rst_5_grp,
+ swdt1_clk_6_grp, swdt1_rst_6_grp, swdt1_clk_7_grp,
+ swdt1_rst_7_grp, swdt1_clk_8_grp, swdt1_rst_8_grp,
+ swdt1_clk_9_grp, swdt1_rst_9_grp, swdt1_clk_10_grp,
+ swdt1_rst_10_grp, swdt1_clk_11_grp, swdt1_rst_11_grp,
+ swdt1_clk_12_grp, swdt1_rst_12_grp, gpio0_0_grp,
+ gpio0_1_grp, gpio0_2_grp, gpio0_3_grp,
+ gpio0_4_grp, gpio0_5_grp, gpio0_6_grp,
+ gpio0_7_grp, gpio0_8_grp, gpio0_9_grp,
+ gpio0_10_grp, gpio0_11_grp, gpio0_12_grp,
+ gpio0_13_grp, gpio0_14_grp, gpio0_15_grp,
+ gpio0_16_grp, gpio0_17_grp, gpio0_18_grp,
+ gpio0_19_grp, gpio0_20_grp, gpio0_21_grp,
+ gpio0_22_grp, gpio0_23_grp, gpio0_24_grp,
+ gpio0_25_grp, gpio0_26_grp, gpio0_27_grp,
+ gpio0_28_grp, gpio0_29_grp, gpio0_30_grp,
+ gpio0_31_grp, gpio0_32_grp, gpio0_33_grp,
+ gpio0_34_grp, gpio0_35_grp, gpio0_36_grp,
+ gpio0_37_grp, gpio0_38_grp, gpio0_39_grp,
+ gpio0_40_grp, gpio0_41_grp, gpio0_42_grp,
+ gpio0_43_grp, gpio0_44_grp, gpio0_45_grp,
+ gpio0_46_grp, gpio0_47_grp, gpio0_48_grp,
+ gpio0_49_grp, gpio0_50_grp, gpio0_51_grp,
+ gpio0_52_grp, gpio0_53_grp, gpio0_54_grp,
+ gpio0_55_grp, gpio0_56_grp, gpio0_57_grp,
+ gpio0_58_grp, gpio0_59_grp, gpio0_60_grp,
+ gpio0_61_grp, gpio0_62_grp, gpio0_63_grp,
+ gpio0_64_grp, gpio0_65_grp, gpio0_66_grp,
+ gpio0_67_grp, gpio0_68_grp, gpio0_69_grp,
+ gpio0_70_grp, gpio0_71_grp, gpio0_72_grp,
+ gpio0_73_grp, gpio0_74_grp, gpio0_75_grp,
+ gpio0_76_grp, gpio0_77_grp, usb0_0_grp,
+ usb1_0_grp, pmu0_0_grp, pmu0_1_grp,
+ pmu0_2_grp, pmu0_3_grp, pmu0_4_grp,
+ pmu0_5_grp, pmu0_6_grp, pmu0_7_grp,
+ pmu0_8_grp, pmu0_9_grp, pmu0_10_grp,
+ pmu0_11_grp, pcie0_0_grp, pcie0_1_grp,
+ pcie0_2_grp, pcie0_3_grp, pcie0_4_grp,
+ pcie0_5_grp, pcie0_6_grp, pcie0_7_grp,
+ csu0_0_grp, csu0_1_grp, csu0_2_grp,
+ csu0_3_grp, csu0_4_grp, csu0_5_grp,
+ csu0_6_grp, csu0_7_grp, csu0_8_grp,
+ csu0_9_grp, csu0_10_grp, csu0_11_grp,
+ dpaux0_0_grp, dpaux0_1_grp, dpaux0_2_grp,
+ dpaux0_3_grp, pjtag0_0_grp, pjtag0_1_grp,
+ pjtag0_2_grp, pjtag0_3_grp, pjtag0_4_grp,
+ pjtag0_5_grp, trace0_0_grp, trace0_clk_0_grp,
+ trace0_1_grp, trace0_clk_1_grp, trace0_2_grp,
+ trace0_clk_2_grp, testscan0_0_grp
+
+ Valid values for pins are:
+ MIO0 - MIO77
+
+ Valid values for function are:
+ ethernet0, ethernet1, ethernet2, ethernet3, gemtsu0, usb0, usb1, mdio0,
+ mdio1, mdio2, mdio3, qspi0, qspi_fbclk, qspi_ss, spi0, spi1, spi0_ss,
+ spi1_ss, sdio0, sdio0_pc, sdio0_wp, sdio0_cd, sdio1, sdio1_pc, sdio1_wp,
+ sdio1_cd, nand0, nand0_ce, nand0_rb, nand0_dqs, can0, can1, uart0, uart1,
+ i2c0, i2c1, ttc0_clk, ttc0_wav, ttc1_clk, ttc1_wav, ttc2_clk, ttc2_wav,
+ ttc3_clk, ttc3_wav, swdt0_clk, swdt0_rst, swdt1_clk, swdt1_rst, gpio0, pmu0,
+ pcie0, csu0, dpaux0, pjtag0, trace0, trace0_clk, testscan0
+
+The following driver-specific properties as defined here are valid to specify in
+a pin configuration subnode:
+ - io-standard: Configure the pin to use the selected IO standard. Valid
+ arguments are 'IO_STANDARD_LVCMOS33' and 'IO_STANDARD_LVCMOS18'.
+ - schmitt-cmos: Selects either Schmitt or CMOS input for MIO pins. Valid
+ arguments are 'PIN_INPUT_TYPE_SCHMITT' and 'PIN_INPUT_TYPE_CMOS'.
+
+Example:
+
+firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+
+ pinctrl0: pinctrl {
+ compatible = "xlnx,zynqmp-pinctrl";
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ schmitt-cmos = <PIN_INPUT_TYPE_CMOS>;
+ };
+ };
+ };
+ };
+};
+
+uart1 {
+ ...
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
+ ...
+
+};
diff --git a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt
index d366f1eb623a..450f3a41c717 100644
--- a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt
+++ b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt
@@ -8,9 +8,27 @@ Required properties:
- compatible: Must contain: "xlnx,zynqmp-power"
- interrupts: Interrupt specifier
--------
-Example
--------
+Optional properties:
+ - mbox-names : Name given to channels seen in the 'mboxes' property.
+ "rx" - Mailbox corresponding to receive path
+ "tx" - Mailbox corresponding to transmit path
+ - mboxes : Standard property to specify a Mailbox. Each value of
+ the mboxes property should contain a phandle to the
+ mailbox controller device node and an args specifier
+ that will be the phandle to the intended sub-mailbox
+ child node to be used for communication. See
+ Documentation/devicetree/bindings/mailbox/mailbox.txt
+ for more details about the generic mailbox controller
+ and client driver bindings. Also see
+ Documentation/devicetree/bindings/mailbox/ \
+ xlnx,zynqmp-ipi-mailbox.txt for typical controller that
+ is used to communicate with this System controllers.
+
+--------
+Examples
+--------
+
+Example with interrupt method:
firmware {
zynqmp_firmware: zynqmp-firmware {
@@ -23,3 +41,20 @@ firmware {
};
};
};
+
+Example with IPI mailbox method:
+
+firmware {
+
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+
+ zynqmp_power: zynqmp-power {
+ compatible = "xlnx,zynqmp-power";
+ mboxes = <&ipi_mailbox_pmu0 0>,
+ <&ipi_mailbox_pmu0 1>;
+ mbox-names = "tx", "rx";
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt b/Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt
new file mode 100644
index 000000000000..de28128c4e5d
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt
@@ -0,0 +1,135 @@
+Xilinx ARM Cortex A53-R5 remoteproc driver
+==========================================
+
+ZynqMP family of devices use two Cortex R5 processors to help with various
+low power / real time tasks.
+
+This driver requires specific ZynqMP hardware design.
+
+ZynqMP R5 Device Node:
+=================================
+A ZynqMP R5 device node is used to represent RPU domain
+within ZynqMP SoC. This device node contains RPU processor
+subnodes.
+
+Required Properties:
+--------------------
+ - compatible : Should be "xlnx,zynqmp-r5-remoteproc-1.0"
+ - core_conf : R5 core configuration (valid string - split or lock-step)
+ - interrupts : Interrupt mapping for remoteproc IPI. It is required if the
+ user uses the remoteproc driver with the RPMsg kernel driver.
+ - interrupt-parent : Phandle for the interrupt controller. It is required if
+ the user uses the remoteproc driver with the RPMsg kernel
+ kernel driver.
+
+ZynqMP R5 Remoteproc Device Node:
+=================================
+A ZynqMP R5 Remoteproc device node is used to represent a RPU processor.
+It is a subnode to the ZynqMP R5 device node. It also contains tightly
+coupled memory subnodes.
+
+Required Properties:
+--------------------
+ - pnode-id: ZynqMP R5 processor power domain ID which will be used by
+ ZynqMP power management unit to idetify the processor.
+
+Optional Properties:
+--------------------
+ - memory-region: reversed memory which will be used by R5 processor
+
+
+ZynqMP R5 Remoteproc Device Node:
+=================================
+A ZynqMP R5 Remoteproc device node is used to represent a RPU processor.
+It is a subnode to the ZynqMP R5 device node.
+
+Required Properties:
+--------------------
+ - pnode-id: ZynqMP R5 processor power domain ID which will be used by
+ ZynqMP power management unit to idetify the processor.
+
+Optional Properties:
+--------------------
+ - memory-region: reversed memory which will be used by R5 processor
+ - mboxes: Specify tx and rx mailboxes
+ - mbox-names: List of identifier strings for tx/rx mailbox channel.
+
+ZynqMP R5 TCM Device Node:
+=================================
+The ZynqMP R5 TCM device node is used to represent the TCM memory.
+It is a subnode to the ZynqMP R5 processor.
+
+Required Properties:
+--------------------
+ - reg: TCM address range
+ - pnode-id: TCM power domain ID
+
+
+Example:
+--------
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ /* R5 0 firmware memory in DDR */
+ rproc_0_fw_reserved: rproc@3ed000000 {
+ no-map;
+ reg = <0x0 0x3ed00000 0x0 0x40000>;
+ };
+ /* DMA shared memory between APU and RPU */
+ rproc_0_dma_reserved: rproc@3ed400000 {
+ compatible = "shared-dma-pool";
+ no-map;
+ reg = <0x0 0x3ed40000 0x0 0x100000>;
+ };
+ };
+
+ zynqmp-r5-remoteproc@0 {
+ compatible = "xlnx,zynqmp-r5-remoteproc-1.0";
+ core_conf = "split";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ r5-0: r5@0 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ memory-region = <&rproc_0_fw_reserved>,
+ <&rproc_0_dma_reserved>;
+ pnode-id = <0x7>;
+ mboxes = <&ipi_mailbox_rpu0 0>, <&ipi_mailbox_rpu0 1>;
+ mbox-names = "tx", "rx";
+ tcm-a: tcm@0 {
+ reg = <0x0 0xFFE00000 0x0 0x10000>,
+ pnode-id = <0xf>;
+ };
+ tcm-b: tcm@1 {
+ reg = <0x0 0xFFE20000 0x0 0x10000>,
+ pnode-id = <0x10>;
+ };
+ };
+ } ;
+
+ zynqmp_ipi {
+ compatible = "xlnx,zynqmp-ipi-mailbox";
+ interrupt-parent = <&gic>;
+ interrupts = <0 29 4>;
+ xlnx,ipi-id = <7>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* APU<->RPU0 IPI mailbox controller */
+ ipi_mailbox_rpu0: mailbox@ff90600 {
+ reg = <0xff990600 0x20>,
+ <0xff990620 0x20>,
+ <0xff9900c0 0x20>,
+ <0xff9900e0 0x20>;
+ reg-names = "local_request_region",
+ "local_response_region",
+ "remote_request_region",
+ "remote_response_region";
+ #mbox-cells = <1>;
+ xlnx,ipi-id = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/zynq_remoteproc.txt b/Documentation/devicetree/bindings/remoteproc/zynq_remoteproc.txt
new file mode 100644
index 000000000000..1f6a2d729a5a
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/zynq_remoteproc.txt
@@ -0,0 +1,36 @@
+Xilinx ARM Cortex A9-A9 remoteproc driver
+==========================================
+
+Zynq family of devices can use one A9 processor to help with various
+low power / real time tasks.
+
+This driver requires specific Zynq hardware design.
+
+Zynq RemoteProc Device Node:
+=================================
+A zynq_remoteproc device node is used to represent the 2nd A9 instance
+within Zynq SoC.
+
+Required properties:
+--------------------
+ - compatible : should be "xlnx,zynq_remoteproc"
+ - vring0: soft interrupt for kicking from firmware
+ - vring1: soft interrupt for kicking from Linux kernel
+ - srams: firmware memories
+
+Example:
+--------
+
+ amba {
+ elf_ddr_0: ddr@3ed00000 {
+ compatible = "mmio-sram";
+ reg = <0x100000 0x80000>;
+ };
+ };
+
+ zynq_remoteproc@0 {
+ compatible = "xlnx,zynq_remoteproc";
+ vring0 = <15>;
+ vring1 = <14>;
+ srams = <&elf_ddr_0>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/uartlite.c b/Documentation/devicetree/bindings/serial/uartlite.c
new file mode 100644
index 000000000000..7ae900880d30
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/uartlite.c
@@ -0,0 +1,26 @@
+Xilinx Axi Uartlite controller Device Tree Bindings
+---------------------------------------------------------
+
+Required properties:
+- compatible : Can be either of
+ "xlnx,xps-uartlite-1.00.a"
+ "xlnx,opb-uartlite-1.00.b"
+- reg : Physical base address and size of the Axi Uartlite
+ registers map.
+- interrupts : Property with a value describing the interrupt
+ number.
+- interrupt-parent : Must be core interrupt controller.
+
+Optional properties:
+- port-number : Set Uart port number
+- clock-names : Should be "s_axi_aclk"
+- clocks : Input clock specifier. Refer to common clock bindings.
+
+Example:
+serial@800C0000 {
+ compatible = "xlnx,xps-uartlite-1.00.a";
+ reg = <0x0 0x800c0000 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x6e 0x1>;
+ port-number = <0>;
+};
diff --git a/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt b/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt
new file mode 100644
index 000000000000..b1c1466a34ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt
@@ -0,0 +1,23 @@
+Xilinx AI Engine NPI
+--------------------
+
+The Xilinx AI Engine NPI space is where the privileged operations for AI Engine
+device are handled, such as reset and pll. The space is typically meant to be
+owned by platform management software, and this space is accessible only when
+the platform management software grants the access. Thus, this dt binding only
+works in such configuration, and in case the platform locks the access,
+the non-secure software fails to access the device.
+
+This is a temporary solution to allow direct access to NPI space.
+
+Required properties:
+
+- compatible: Must be "xlnx,ai-engine-npi"
+- reg: Physical base address and length of the registers set for the device.
+
+Example:
+
+ aie-npi@f70a0000 {
+ compatible = "xlnx,ai-engine-npi";
+ reg = <0x0 0xf70a0000 0x0 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai_engine.txt b/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai_engine.txt
new file mode 100644
index 000000000000..04244a6bc2da
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai_engine.txt
@@ -0,0 +1,28 @@
+Xilinx AI Engine
+----------------
+
+The Xilinx AI Engine is a tile processor with many cores (up to 400) that
+can run in parallel. The data routing between cores is configured through
+internal switches, and shim tiles interface with external interconnect, such
+as memory or PL.
+
+Required properties:
+
+- compatible: Must be "xlnx,ai_engine".
+- reg: Physical base address and length of the registers set for the device.
+- interrupt-parent: the phandle to the interrupt controller.
+- interrupts: the interrupt numbers.
+- interrupt-names: Should be "interrupt0", "interrupt1", "interrupt2" or
+ "interrupt3".
+
+Example:
+
+ ai_engine@80000000 {
+ compatible = "xlnx,ai_engine";
+ reg = <0x0 0x80000000 0x0 0x20000000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x94 0x1>,
+ <0x0 0x95 0x1>,
+ <0x0 0x96 0x1>;
+ interrupt-names = "interrupt1", "interrupt2", "interrupt3";
+ };
diff --git a/Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt b/Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt
index 6786d6715df0..98474f2accca 100644
--- a/Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt
+++ b/Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt
@@ -16,16 +16,60 @@ Required properties:
1. vcu slcr
2. Logicore
reg-names should contain name for the each register sequence.
-- clocks: phandle for aclk and pll_ref clocksource
-- clock-names: The identification string, "aclk", is always required for
- the axi clock. "pll_ref" is required for pll.
+- #clock-cells : Must be 1
+- clocks: phandle for aclk, pll_ref and encoder/decoder clocksources
+- clock-names: The identification string,
+ * "aclk", is always required for the axi clock.
+ * "pll_ref" is required for pll.
+ * "vcu_core_enc" is required for VCU core encoder.
+ * "vcu_core_dec" is required for VCU core decoder.
+ * "vcu_mcu_enc" is required for MCU core encoder.
+ * "vcu_mcu_dec" is required for MCU core decoder.
+- ranges
+- VCU Init driver node define the following child nodes:
+ * Allegro encoder driver node
+ - compatible: Must be "al,al5e"
+ - reg: There is a one set of register.
+ - interrupts: interrupt number to the cpu.
+ - interrupt-parent: the phandle for the interrupt controller
+ that services interrupts for this device.
+ * Allegro decoder driver node
+ - compatible: Must be "al,al5d"
+ - reg: There is a one set of register.
+ - interrupts: interrupt number to the cpu.
+ - interrupt-parent: the phandle for the interrupt controller
+ that services interrupts for this device.
+
+Optional properties:
+- reset-gpios : The GPIO used to reset the VCU, if available. Need use this
+ reset gpio when in design 'vcu_resetn' is driven by gpio. See
+ Documentation/devicetree/bindings/gpio/gpio.txt for details.
+
Example:
xlnx_vcu: vcu@a0040000 {
compatible = "xlnx,vcu-logicoreip-1.0";
+ #address-cells = <2>;
+ #size-cells = <2>;
reg = <0x0 0xa0040000 0x0 0x1000>,
<0x0 0xa0041000 0x0 0x1000>;
reg-names = "vcu_slcr", "logicore";
- clocks = <&si570_1>, <&clkc 71>;
- clock-names = "pll_ref", "aclk";
+ reset-gpios = <&gpio 0x4e GPIO_ACTIVE_HIGH>;
+ #clock-cells = <0x1>;
+ clock-names = "pll_ref", "aclk", "vcu_core_enc", "vcu_core_dec", "vcu_mcu_enc", "vcu_mcu_dec";
+ clocks = <&si570_1>, <&clkc 71>, <&xlnx_vcu 1>, <&xlnx_vcu 2>, <&xlnx_vcu 3>, <&xlnx_vcu 4>;
+ ranges;
+ encoder: al5e@a0000000 {
+ compatible = "al,al5e";
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ interrupts = <0 89 4>;
+ interrupt-parent = <&gic>;
+ };
+
+ decoder: al5d@a0020000 {
+ compatible = "al,al5d";
+ reg = <0x0 0xa0020000 0x0 0x10000>;
+ interrupts = <0 89 4>;
+ interrupt-parent = <&gic>;
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt b/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
index cbc93c8f4963..6b5e4f762268 100644
--- a/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
+++ b/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
@@ -14,7 +14,7 @@ Required properties:
- interrupts: List of Interrupt numbers.
- reg: Base address and size of the IP core instance.
- clock-names: List of input clocks.
- Required elements: "s_axi_lite_aclk", "aud_mclk"
+ Required elements: "s_axi_lite_aclk", "m_axis_mm2s_aclk", "aud_mclk", "s_axis_s2mm_aclk"
- clocks: Input clock specifier. Refer to common clock bindings.
Example:
@@ -24,6 +24,6 @@ Example:
interrupt-parent = <&gic>;
interrupts = <0 104 4>, <0 105 4>;
reg = <0x0 0x80010000 0x0 0x1000>;
- clock-names = "s_axi_lite_aclk", "aud_mclk";
- clocks = <&clk 71>, <&clk_wiz_1 0>;
+ clock-names = "s_axi_lite_aclk", "m_axis_mm2s_aclk", "aud_mclk", "s_axis_s2mm_aclk";
+ clocks = <&clk 71>, <&audio_ss_0_clk_wiz_0 0>, <&audio_ss_0_clk_wiz_0 0>, <&clk 71>;
};
diff --git a/Documentation/devicetree/bindings/sound/xlnx,dp-snd-card.txt b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-card.txt
new file mode 100644
index 000000000000..7eb932913983
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-card.txt
@@ -0,0 +1,17 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort Audio Card
+
+The card driver integrates codec and pcm components and represents as a single
+audio device.
+
+Required properties:
+ - compatible: Should be "xlnx,dp-snd-card".
+ - xlnx,dp-snd-pcm: phandle(s) to the ZynqMP DP PCM node.
+ - xlnx,dp-snd-codec: phandle to the ZynqMP DP card node.
+
+Example:
+
+ xlnx_dp_snd_card: dp_snd_card {
+ compatible = "xlnx,dp-snd-card";
+ xlnx,dp-snd-pcm = <&xlnx_dp_snd_pcm0>, <&xlnx_dp_snd_pcm1>;
+ xlnx,dp-snd-codec = <&xlnx_dp_snd_codec0>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,dp-snd-codec.txt b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-codec.txt
new file mode 100644
index 000000000000..d094fdd9d9e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-codec.txt
@@ -0,0 +1,18 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort Codec
+
+The codec driver handles the audio clock and format management.
+
+Required properties:
+ - compatible: Should be "xlnx,dp-snd-codec".
+ - clocks: The phandle for the audio clock. The audio clock should be
+ configured to the correct audio clock rate, which should be one of
+ (44100 * 512) or (48000 * 512).
+ - clock-names: The identification string should be "aud_clk".
+
+Example:
+
+ xlnx_dp_snd_codec0: dp_snd_codec0 {
+ compatible = "xlnx,dp-snd-codec";
+ clocks = <&dp_aud_clk>;
+ clock-names = "aud_clk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,dp-snd-pcm.txt b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-pcm.txt
new file mode 100644
index 000000000000..303232a2a375
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-pcm.txt
@@ -0,0 +1,18 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort PCM
+
+The DPDMA driver of ZynqMP DisplayPort subsystem is based on DMA engine,
+and the DP PCM driver is based on snd dmaengine helpers.
+
+Required properties:
+ - compatible: Should be "xlnx,dp-snd-pcm".
+ - dmas: the phandle list of DMA specifiers. The dma channel ID should be one
+ of 4 for audio0 channel or 5 for audio1 channel.
+ - dma-names: the indentifier strings for DMAs. The value should be "tx".
+
+Example:
+
+ xlnx_dp_snd_pcm0: dp_snd_pcm0 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 4>;
+ dma-names = "tx";
+ };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,i2s.txt b/Documentation/devicetree/bindings/sound/xlnx,i2s.txt
index 5e7c7d5bb60a..86b727c88bf9 100644
--- a/Documentation/devicetree/bindings/sound/xlnx,i2s.txt
+++ b/Documentation/devicetree/bindings/sound/xlnx,i2s.txt
@@ -11,18 +11,32 @@ Required property common to both I2S playback and capture:
- xlnx,dwidth: sample data width. Can be any of 16, 24.
- xlnx,num-channels: Number of I2S streams. Can be any of 1, 2, 3, 4.
supported channels = 2 * xlnx,num-channels
+ - xlnx,snd-pcm: reference to audio formatter block
+ - clock-names: List of input clocks.
+ Required elements for I2S Tx: "s_axi_ctrl_aclk", "aud_mclk", "s_axis_aud_aclk".
+ Required elements for I2S Rx: "s_axi_ctrl_aclk", "aud_mclk", "m_axis_aud_aclk".
+ - clocks: Input clock specifier. Refer to common clock bindings.
Example:
i2s_receiver@a0080000 {
compatible = "xlnx,i2s-receiver-1.0";
+ clock-names = "s_axi_ctrl_aclk", "aud_mclk", "m_axis_aud_aclk";
+ clocks = <&clk 71>, <&audio_ss_0_clk_wiz_0 0>, <&clk 71>;
reg = <0x0 0xa0080000 0x0 0x10000>;
xlnx,dwidth = <0x18>;
xlnx,num-channels = <1>;
+ xlnx,snd-pcm = <&audio_ss_0_audio_formatter_0>;
};
i2s_transmitter@a0090000 {
compatible = "xlnx,i2s-transmitter-1.0";
+ clock-names = "s_axi_ctrl_aclk", "aud_mclk", "s_axis_aud_aclk";
+ clocks = <&clk 71>, <&audio_ss_0_clk_wiz_0 0>, <&audio_ss_0_clk_wiz_0 0>;
reg = <0x0 0xa0090000 0x0 0x10000>;
xlnx,dwidth = <0x18>;
xlnx,num-channels = <1>;
+ xlnx,snd-pcm = <&audio_ss_0_audio_formatter_0>;
};
+ Documentation of "audio_ss_0_audio_formatter_0" node is located
+ at Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
+
diff --git a/Documentation/devicetree/bindings/sound/xlnx,spdif.txt b/Documentation/devicetree/bindings/sound/xlnx,spdif.txt
index 15c2d64d247c..59bfa6bcb566 100644
--- a/Documentation/devicetree/bindings/sound/xlnx,spdif.txt
+++ b/Documentation/devicetree/bindings/sound/xlnx,spdif.txt
@@ -5,7 +5,8 @@ The IP supports playback and capture of SPDIF audio
Required properties:
- compatible: "xlnx,spdif-2.0"
- clock-names: List of input clocks.
- Required elements: "s_axi_aclk", "aud_clk_i"
+ Required elements for SPDIF Tx: "aud_clk_i", "s_axi_aclk", "s_axis_aclk".
+ Required elements for SPDIF Rx: "aud_clk_i", "s_axi_aclk", "m_axis_aclk".
- clocks: Input clock specifier. Refer to common clock bindings.
- reg: Base address and address length of the IP core instance.
- interrupts-parent: Phandle for interrupt controller.
@@ -14,10 +15,10 @@ Required properties:
1 :- transmitter mode
- xlnx,aud_clk_i: input audio clock value.
-Example:
+Example - SPDIF Rx:
spdif_0: spdif@80010000 {
- clock-names = "aud_clk_i", "s_axi_aclk";
- clocks = <&misc_clk_0>, <&clk 71>;
+ clock-names = "aud_clk_i", "s_axi_aclk", "m_axis_aclk";
+ clocks = <&si570_1>, <&clk 71>, <&clk 71>;
compatible = "xlnx,spdif-2.0";
interrupt-names = "spdif_interrupt";
interrupt-parent = <&gic>;
diff --git a/Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt b/Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt
new file mode 100644
index 000000000000..69134458b9d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt
@@ -0,0 +1,60 @@
+Device-Tree bindings for Xilinx SDI audio
+
+The IP core supports embed/extract of audio in SDI Tx and Rx
+protocol respectively. Reference to PG:
+https://www.xilinx.com/support/documentation/ip_documentation/v_uhdsdi_audio/v1_0/pg309-v-uhdsdi-audio.pdf
+
+Required properties:
+ - compatible: Should be one of:
+ "xlnx,v-uhdsdi-audio-2.0"
+ "xlnx,v-uhdsdi-audio-1.0"
+ Note: v1.0 (xlnx,v-uhdsdi-audio-1.0) is deprecated
+ and driver no longer supports it. Mandatory to upgrade to v2.0
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - reg: Base address and size of the IP core instance.
+ - xlnx,snd-pcm: reference to audio formatter block
+ - clock-names: List of input clocks.
+ Required elements for SDI Embed: "s_axi_aclk", "s_axis_clk", "sdi_embed_clk".
+ Required elements for SDI Extract: "s_axi_aclk", "sdi_extract_clk", "m_axis_clk".
+ - clocks: Input clock specifier. Refer to common clock bindings.
+
+SDI embed contains a output port to remote endpoint of SDI video Tx node.
+This pipeline should be described using the DT bindings defined in
+Documentation/devicetree/bindings/graph.txt
+
+Example:
+
+ audio_ss_0_v_uhdsdi_audio_extract_0: v_uhdsdi_audio@80080000 {
+ compatible = "xlnx,v-uhdsdi-audio-2.0";
+ clock-names = "s_axi_aclk", "sdi_extract_clk", "m_axis_clk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>, <&misc_clk_0>;
+ interrupt-names = "interrupt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 106 4>;
+ reg = <0x0 0x80080000 0x0 0x10000>;
+ xlnx,snd-pcm = <&audio_ss_0_audio_formatter_0>;
+ };
+
+ audio_ss_0_v_uhdsdi_audio_embed_0: v_uhdsdi_audio@80090000 {
+ compatible = "xlnx,v-uhdsdi-audio-2.0";
+ clock-names = "s_axi_aclk", "s_axis_clk", "sdi_embed_clk";
+ clocks = <&misc_clk_0>, <&misc_clk_0>, <&misc_clk_1>;
+ interrupt-names = "interrupt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 107 4>;
+ reg = <0x0 0x80090000 0x0 0x10000>;
+ xlnx,snd-pcm = <&audio_ss_0_audio_formatter_0>;
+ sdi_av_port: port@0 {
+ reg = <0>;
+ sditx_audio_embed_src: endpoint {
+ remote-endpoint = <&sdi_audio_sink_port>;
+ };
+ };
+ };
+
+ Node 'v_smpte_uhdsdi_tx_ss' is documented in SDI Tx video bindings,
+ located at Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt.
+
+ Node 'audio_ss_0_audio_formatter_0' node is documented
+ at Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.txt b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
index dc924a5f71db..9a9af55055f5 100644
--- a/Documentation/devicetree/bindings/spi/spi-xilinx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
@@ -6,16 +6,27 @@ Required properties:
- reg : Physical base address and size of SPI registers map.
- interrupts : Property with a value describing the interrupt
number.
+- fifo-size : Depth of TX/RX Fifos
Optional properties:
-- xlnx,num-ss-bits : Number of chip selects used.
+- num-cs : Number of chip selects used.
+- bits-per-word : Number of bits per word.
+- clock-names : Can be one or more strings from "axi_clk", "axi4_clk"
+ and "spi_clk" depending on IP configurations.
+- clocks : Input clock specifier. Refer to common clock bindings.
+- xlnx,startup-block : Indicates whether startup block is enabled or disabled.
Example:
axi_quad_spi@41e00000 {
compatible = "xlnx,xps-spi-2.00.a";
+ clock-names = "axi_clk", "axi4_clk", "spi_clk";
+ clocks = <&clkc 71>, <&clkc 72>, <&clkc 73>;
interrupt-parent = <&intc>;
interrupts = <0 31 1>;
reg = <0x41e00000 0x10000>;
- xlnx,num-ss-bits = <0x1>;
+ num-cs = <0x1>;
+ fifo-size = <256>;
+ bits-per-word = <8>;
+ xlnx,startup-block;
};
diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
index 0f6d37ff541c..a40827f58164 100644
--- a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
@@ -2,7 +2,8 @@ Xilinx Zynq UltraScale+ MPSoC GQSPI controller Device Tree Bindings
-------------------------------------------------------------------
Required properties:
-- compatible : Should be "xlnx,zynqmp-qspi-1.0".
+- compatible : Should be "xlnx,zynqmp-qspi-1.0" for zynqmp or
+ "xlnx,versal-qspi-1.0" for versal.
- reg : Physical base address and size of GQSPI registers map.
- interrupts : Property with a value describing the interrupt
number.
@@ -12,6 +13,14 @@ Required properties:
Optional properties:
- num-cs : Number of chip selects used.
+- has-io-mode : boolean property describes the controller operating
+ mode. if exists controller will operate in IO mode
+ else dma mode.
+- is-dual : zynqmp qspi support for dual-parallel mode configuration
+ value should be 1.
+- is-stacked : zynqmp qspi support for stacked mode configuration.
+ to enable this mode, is-dual should be 0 and is-stacked
+ should be 1.
Example:
qspi: spi@ff0f0000 {
diff --git a/Documentation/devicetree/bindings/staging/xroeframer/xroeframer.txt b/Documentation/devicetree/bindings/staging/xroeframer/xroeframer.txt
new file mode 100644
index 000000000000..8dabef16d083
--- /dev/null
+++ b/Documentation/devicetree/bindings/staging/xroeframer/xroeframer.txt
@@ -0,0 +1,17 @@
+* Xilinx Radio over Ethernet Framer driver
+
+Required properties:
+- compatible: must be "xlnx,roe-framer-1.0"
+- reg: physical base address of the framer and length of memory mapped region
+- clock-names: list of clock names
+- clocks: list of clock sources corresponding to the clock names
+
+Example:
+ roe_framer@a0000000 {
+ compatible = "xlnx,roe-framer-1.0";
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ clock-names = "s_axi_aclk", "m_axis_defm_aclk",
+ "s_axis_fram_aclk", "tx0_eth_port_clk",
+ "internal_bus_clk";
+ clocks = <0x43 0x44 0x44 0x45 0x45>;
+ };
diff --git a/Documentation/devicetree/bindings/staging/xroetrafficgen/xroetrafficgen.txt b/Documentation/devicetree/bindings/staging/xroetrafficgen/xroetrafficgen.txt
new file mode 100644
index 000000000000..3516d3ff8009
--- /dev/null
+++ b/Documentation/devicetree/bindings/staging/xroetrafficgen/xroetrafficgen.txt
@@ -0,0 +1,15 @@
+* Xilinx Radio over Ethernet Traffic Generator driver
+
+Required properties:
+- compatible: must be "xlnx,roe-framer-1.0"
+- reg: physical base address of the framer and length of memory mapped region
+- clock-names: list of clock names
+- clocks: list of clock sources corresponding to the clock names
+
+Example:
+ roe_radio_ctrl@a0060000 {
+ compatible = "xlnx,roe-traffic-gen-1.0";
+ reg = <0x0 0xa0060000 0x0 0x10000>;
+ clock-names = "s_axis_fram_aclk", "s_axi_aclk";
+ clocks = <0x44 0x43>;
+ };
diff --git a/Documentation/devicetree/bindings/uio/xilinx_apm.txt b/Documentation/devicetree/bindings/uio/xilinx_apm.txt
new file mode 100644
index 000000000000..a11c82e84b6a
--- /dev/null
+++ b/Documentation/devicetree/bindings/uio/xilinx_apm.txt
@@ -0,0 +1,44 @@
+* Xilinx AXI Performance monitor IP
+
+Required properties:
+- compatible: "xlnx,axi-perf-monitor"
+- interrupts: Should contain APM interrupts.
+- interrupt-parent: Must be core interrupt controller.
+- reg: Should contain APM registers location and length.
+- xlnx,enable-profile: Enables the profile mode.
+- xlnx,enable-trace: Enables trace mode.
+- xlnx,num-monitor-slots: Maximum number of slots in APM.
+- xlnx,enable-event-count: Enable event count.
+- xlnx,enable-event-log: Enable event logging.
+- xlnx,have-sampled-metric-cnt:Sampled metric counters enabled in APM.
+- xlnx,num-of-counters: Number of counters in APM
+- xlnx,metric-count-width: Metric Counter width (32/64)
+- xlnx,metrics-sample-count-width: Sampled metric counter width
+- xlnx,global-count-width: Global Clock counter width
+- clocks: Input clock specifier.
+
+Optional properties:
+- xlnx,id-filter-32bit: APM is in 32-bit mode
+
+Example:
+++++++++
+
+apm: apm@44a00000 {
+ compatible = "xlnx,axi-perf-monitor";
+ interrupt-parent = <&axi_intc_1>;
+ interrupts = <1 2>;
+ reg = <0x44a00000 0x1000>;
+ clocks = <&clkc 15>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <4>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <1>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <8>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
+ xlnx,id-filter-32bit;
+};
diff --git a/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt b/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
index 4aae5b2cef56..622e27fc0b71 100644
--- a/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
@@ -1,7 +1,8 @@
Xilinx SuperSpeed DWC3 USB SoC controller
Required properties:
-- compatible: Should contain "xlnx,zynqmp-dwc3"
+- compatible: May contain "xlnx,zynqmp-dwc3" or "xlnx,versal-dwc3"
+- reg: Base address and length of the register control block
- clocks: A list of phandles for the clocks listed in clock-names
- clock-names: Should contain the following:
"bus_clk" Master/Core clock, have to be >= 125 MHz for SS
@@ -13,20 +14,38 @@ Required child node:
A child node must exist to represent the core DWC3 IP block. The name of
the node is not important. The content of the node is defined in dwc3.txt.
+Optional properties for xlnx,zynqmp-dwc3:
+- nvmem-cells: list of phandle to the nvmem data cells.
+- nvmem-cell-names: Names for the each nvmem-cells specified.
+
+Optional properties for snps,dwc3:
+- dma-coherent: Enable this flag if CCI is enabled in design. Adding this
+ flag configures Global SoC bus Configuration Register and
+ Xilinx USB 3.0 IP - USB coherency register to enable CCI.
+- snps,enable-hibernation: Add this flag to enable hibernation support for
+ peripheral mode
+- interrupt-names: This property provides the names of the interrupt ids used
+
Example device node:
usb@0 {
#address-cells = <0x2>;
#size-cells = <0x1>;
compatible = "xlnx,zynqmp-dwc3";
+ reg = <0x0 0xff9d0000 0x0 0x100>;
clock-names = "bus_clk" "ref_clk";
clocks = <&clk125>, <&clk125>;
ranges;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
dwc3@fe200000 {
compatible = "snps,dwc3";
reg = <0x0 0xfe200000 0x40000>;
+ interrupt-name = "dwc_usb3";
interrupts = <0x0 0x41 0x4>;
dr_mode = "host";
+ dma-coherent;
+ snps,enable-hibernation
};
};
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 14d97c40b75c..81df8753ff15 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -85,6 +85,19 @@ Optional properties:
- snps,quirk-frame-length-adjustment: Value for GFLADJ_30MHZ field of GFLADJ
register for post-silicon frame length adjustment when the
fladj_30mhz_sdbnd signal is invalid or incorrect.
+ - snps,refclk_fladj: Enable frame length adjustment for SOF/ITP counter.
+ - snps,enable_guctl1_resume_quirk: Adding this flag sets bit 10 of GUCTL1
+ thus enabling the workaround in HW to fix the issue where the controller
+ was not able to generate correct CRC checksum on the very first transfer
+ packet after sending resume signal.
+ - snps,enable_guctl1_ipd_quirk: Adding this flag sets bit 9 of GUCTL1
+ enabling the workaround in HW to reduce the Inter Packet Delay (IPD)
+ and making controller enumerate FS/LS devices connected behind VIA-LAB.
+ - snps,xhci-stream-quirk: Dwc3 host controller has a bug where it sometimes
+ fails to process the traansfer descriptors present in the BULK IN
+ stream ring. Since the controller is not processing any TD, no transfer
+ events will be triggered, resulting in a hang condition. Enabling this
+ flag in dts fixes the above said issue.
- snps,rx-thr-num-pkt-prd: periodic ESS RX packet threshold count - host mode
only. Set this and rx-max-burst-prd to a valid,
non-zero value 1-16 (DWC_usb31 programming guide
diff --git a/Documentation/devicetree/bindings/usb/ehci-xilinx.txt b/Documentation/devicetree/bindings/usb/ehci-xilinx.txt
new file mode 100644
index 000000000000..4df7ad6e3541
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ehci-xilinx.txt
@@ -0,0 +1,21 @@
+Xilinx USB EHCI controller
+
+Required properties:
+- compatible: must be "xlnx,xps-usb-host-1.00.a"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: The EHCI interrupt
+
+Optional properties:
+- xlnx,ext-vbus-valid: Use external VBUS
+- xlnx,support-usb-fs: Support for Full Speed USB
+- xlnx,use-phy-bus-pwr: Use phy bus power in USB
+
+Example:
+
+ xps_usb_host_0: usb@82400000 {
+ compatible = "xlnx,xps-usb-host-1.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 0 2 >;
+ reg = < 0x82400000 0x200 >;
+ } ;
diff --git a/Documentation/devicetree/bindings/usb/udc-xilinx.txt b/Documentation/devicetree/bindings/usb/udc-xilinx.txt
index 47b4e397a08d..86f705384132 100644
--- a/Documentation/devicetree/bindings/usb/udc-xilinx.txt
+++ b/Documentation/devicetree/bindings/usb/udc-xilinx.txt
@@ -6,13 +6,16 @@ Required properties:
device registers map.
- interrupts : Should contain single irq line of USB2 device
controller
-- xlnx,has-builtin-dma : if DMA is included
+- xlnx,has-builtin-dma : If DMA is included
-Example:
- axi-usb2-device@42e00000 {
- compatible = "xlnx,usb2-device-4.00.a";
- interrupts = <0x0 0x39 0x1>;
- reg = <0x42e00000 0x10000>;
- xlnx,has-builtin-dma;
- };
+Optional properties:
+- clock-names : Should be "s_axi_aclk"
+- clocks : Input clock specifier. Refer to common clock bindings.
+Example:
+ axi-usb2-device@42e00000 {
+ compatible = "xlnx,usb2-device-4.00.a";
+ interrupts = <0x0 0x39 0x1>;
+ reg = <0x42e00000 0x10000>;
+ xlnx,has-builtin-dma;
+ };
diff --git a/Documentation/devicetree/bindings/video/xilinx-fb.txt b/Documentation/devicetree/bindings/video/xilinx-fb.txt
new file mode 100644
index 000000000000..11a6ba01a032
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/xilinx-fb.txt
@@ -0,0 +1,35 @@
+Xilinx Axi TFT controller Device Tree Bindings
+---------------------------------------------------------
+
+Required properties:
+- compatible : Can be any of the following
+ "xlnx,xps-tft-1.00.a","xlnx,xps-tft-2.00.a",
+ "xlnx,xps-tft-2.01.a","xlnx,plb-tft-cntlr-ref-1.00.a",
+ "xlnx,plb-dvi-cntlr-ref-1.00.c"
+- reg : Physical base address and size of the Axi Tft
+ registers map
+- interrupts : Property with a value describing the interrupt
+ number
+- interrupt-parent : Must be core interrupt controller
+- xlnx,dcr-splb-slave-if : Accessing TFT Controller through Bus or DCR interface.
+ for BUS its value is 1 and for DCR it is 0.
+ default is BUS i.e. 1
+- resolution : <xres yres> pixel resolution of framebuffer.Some
+ implementations use a different resolution
+- virtual-resolution : <xvirt yvirt> Size of framebuffer in memory.
+- rotate-display : (empty) rotate display 180 degrees
+- phys-size : <screen_width_mm screen_height_mm> width and heigth of
+ screen
+
+Example:
+axi_tft_0: axi_tft@44a00000 {
+ compatible = "xlnx,xps-tft-1.00.a";
+ interrupt-parent = <&axi_intc>;
+ interrupts = <1 0>;
+ reg = <0x44a00000 0x10000>;
+ xlnx,dcr-splb-slave-if = <0x1>;
+ resolution = <640 480>;
+ virtual-resolution = <1024 480>;
+ phys-size = <1024 512>;
+ rotate-display;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt b/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt
index c6ae9c9d5e3e..10d68003158d 100644
--- a/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt
@@ -1,21 +1,28 @@
-Xilinx AXI/PLB soft-core watchdog Device Tree Bindings
----------------------------------------------------------
+Xilinx AXI/PLB soft-core watchdog and window watchdog Device Tree Bindings
+--------------------------------------------------------------------------
Required properties:
- compatible : Should be "xlnx,xps-timebase-wdt-1.00.a" or
- "xlnx,xps-timebase-wdt-1.01.a".
+ "xlnx,xps-timebase-wdt-1.01.a" or
+ "xlnx,versal-wwdt-1.0".
- reg : Physical base address and size
Optional properties:
- clocks : Input clock specifier. Refer to common clock
bindings.
- clock-frequency : Frequency of clock in Hz
+
+Optional properties for AXI/PLB soft-core watchdog:
- xlnx,wdt-enable-once : 0 - Watchdog can be restarted
1 - Watchdog can be enabled just once
- xlnx,wdt-interval : Watchdog timeout interval in 2^<val> clock cycles,
<val> is integer from 8 to 31.
+Optional properties for window watchdog:
+- timeout-sec : Watchdog timeout value (in seconds).
+
Example:
+Xilinx AXI/PLB soft-core watchdog:
axi-timebase-wdt@40100000 {
clock-frequency = <50000000>;
compatible = "xlnx,xps-timebase-wdt-1.00.a";
@@ -24,3 +31,11 @@ axi-timebase-wdt@40100000 {
xlnx,wdt-enable-once = <0x0>;
xlnx,wdt-interval = <0x1b>;
} ;
+
+Xilinx Versal window watchdog:
+watchdog@fd4d0000 {
+ compatible = "xlnx,versal-wwdt-1.0";
+ reg = <0x0 0xfd4d0000 0x0 0x10000>;
+ clocks = <&clk25>;
+ timeout-sec = <10>;
+} ;
diff --git a/Documentation/devicetree/bindings/xilinx.txt b/Documentation/devicetree/bindings/xilinx.txt
index d058ace29345..0c75bb153ca6 100644
--- a/Documentation/devicetree/bindings/xilinx.txt
+++ b/Documentation/devicetree/bindings/xilinx.txt
@@ -253,6 +253,7 @@
Optional properties:
- 8-bit (empty) : Set this property for SystemACE in 8 bit mode
+ - port-number = <port_number> : Set port number for particular device
iii) Xilinx EMAC and Xilinx TEMAC
diff --git a/Documentation/devicetree/bindings/xlnx,ctrl-fb.txt b/Documentation/devicetree/bindings/xlnx,ctrl-fb.txt
new file mode 100644
index 000000000000..8abc053dfa30
--- /dev/null
+++ b/Documentation/devicetree/bindings/xlnx,ctrl-fb.txt
@@ -0,0 +1,22 @@
+The Xilinx framebuffer DMA engine supports two soft IP blocks: one IP
+block is used for reading video frame data from memory (FB Read) to the device
+and the other IP block is used for writing video frame data from the device
+to memory (FB Write). Both the FB Read/Write IP blocks are aware of the
+format of the data being written to or read from memory including RGB and
+YUV in packed, planar, and semi-planar formats. Because the FB Read/Write
+is format aware, only one buffer pointer is needed by the IP blocks even
+when planar or semi-planar format are used.
+
+Required properties:
+ - compatible: Should be "xlnx,ctrl-fbwr-1.0" for framebuffer Write OR
+ "xlnx,ctrl-fbrd-1.0" for framebuffer Read.
+ - reg: Base address and size of the IP core.
+ - reset-gpios: gpio to reset the framebuffer IP
+
+Example:
+
+ fbwr@0xa0000000 {
+ compatible = "xlnx,ctrl-fbwr-1.0";
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ reset-gpios = <&gpio 82 1>;
+ };
diff --git a/Documentation/devicetree/bindings/xlnx,ctrl-vpss.txt b/Documentation/devicetree/bindings/xlnx,ctrl-vpss.txt
new file mode 100644
index 000000000000..04e6426f4e9b
--- /dev/null
+++ b/Documentation/devicetree/bindings/xlnx,ctrl-vpss.txt
@@ -0,0 +1,21 @@
+The Xilinx VPSS Scaler is a Video IP that supports up scaling, down scaling and
+no scaling functionailty along with color space conversion. This supports custom
+resolution values between 0 to 4096.
+
+Required properties:
+
+- compatible: Must be "xlnx,ctrl-xvpss-1.0".
+- reg: Base address and size of the IP core.
+- reset-gpios: gpio to reset the framebuffer IP
+- xlnx,vpss-taps: number of taps
+- xlnx,vpss-ppc: pixels per clock
+
+Example:
+
+ ctrlvpss: vpss@0xa0200000 {
+ compatible = "xlnx,ctrl-xvpss-1.0";
+ reg = <0x0 0xa0200000 0x0 0x30000>;
+ reset-gpios = <&gpio 80 1>;
+ xlnx,vpss-taps = <6>;
+ xlnx,vpss-ppc = <2>;
+ };
diff --git a/Documentation/devicetree/configfs-overlays.txt b/Documentation/devicetree/configfs-overlays.txt
new file mode 100644
index 000000000000..5fa43e064307
--- /dev/null
+++ b/Documentation/devicetree/configfs-overlays.txt
@@ -0,0 +1,31 @@
+Howto use the configfs overlay interface.
+
+A device-tree configfs entry is created in /config/device-tree/overlays
+and and it is manipulated using standard file system I/O.
+Note that this is a debug level interface, for use by developers and
+not necessarily something accessed by normal users due to the
+security implications of having direct access to the kernel's device tree.
+
+* To create an overlay you mkdir the directory:
+
+ # mkdir /config/device-tree/overlays/foo
+
+* Either you echo the overlay firmware file to the path property file.
+
+ # echo foo.dtbo >/config/device-tree/overlays/foo/path
+
+* Or you cat the contents of the overlay to the dtbo file
+
+ # cat foo.dtbo >/config/device-tree/overlays/foo/dtbo
+
+The overlay file will be applied, and devices will be created/destroyed
+as required.
+
+To remove it simply rmdir the directory.
+
+ # rmdir /config/device-tree/overlays/foo
+
+The rationalle of the dual interface (firmware & direct copy) is that each is
+better suited to different use patterns. The firmware interface is what's
+intended to be used by hardware managers in the kernel, while the copy interface
+make sense for developers (since it avoids problems with namespaces).
diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst
index dfc4486b5743..94c054fb0fd6 100644
--- a/Documentation/driver-api/dmaengine/provider.rst
+++ b/Documentation/driver-api/dmaengine/provider.rst
@@ -191,6 +191,13 @@ Currently, the types available are:
- Used by the client drivers to register a callback that will be
called on a regular basis through the DMA controller interrupt
+- DMA_SG
+ - The device supports memory to memory scatter-gather
+ transfers.
+ - Even though a plain memcpy can look like a particular case of a
+ scatter-gather transfer, with a single chunk to transfer, it's a
+ distinct transaction type in the mem2mem transfers case
+
- DMA_PRIVATE
- The devices only supports slave transfers, and as such isn't
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index ab1a48a5ae80..0e27612e3d73 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -1416,6 +1416,43 @@ The following tables list existing packed RGB formats.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-RBG101010-1X30:
+
+ - MEDIA_BUS_FMT_RBG101010_1X30
+ - 0x101b
+ -
+ - 0
+ - 0
+ - r\ :sub:`9`
+ - r\ :sub:`8`
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - b\ :sub:`9`
+ - b\ :sub:`8`
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`9`
+ - g\ :sub:`8`
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
.. raw:: latex
@@ -1523,6 +1560,47 @@ The following table list existing packed 36bit wide RGB formats.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-RBG121212-1X36:
+
+ - MEDIA_BUS_FMT_RBG121212_1X36
+ - 0x101c
+ -
+ - r\ :sub:`11`
+ - r\ :sub:`10`
+ - r\ :sub:`9`
+ - r\ :sub:`8`
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - b\ :sub:`11`
+ - b\ :sub:`10`
+ - b\ :sub:`9`
+ - b\ :sub:`8`
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`11`
+ - g\ :sub:`10`
+ - g\ :sub:`9`
+ - g\ :sub:`8`
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
.. raw:: latex
@@ -1692,6 +1770,78 @@ The following table list existing packed 48bit wide RGB formats.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-RBG161616-1X48:
+
+ - MEDIA_BUS_FMT_RBG161616_1X48
+ - 0x101d
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - r\ :sub:`15`
+ - r\ :sub:`14`
+ - r\ :sub:`13`
+ - r\ :sub:`12`
+ - r\ :sub:`11`
+ - r\ :sub:`10`
+ - r\ :sub:`9`
+ - r\ :sub:`8`
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ * -
+ -
+ -
+ - b\ :sub:`15`
+ - b\ :sub:`14`
+ - b\ :sub:`13`
+ - b\ :sub:`12`
+ - b\ :sub:`11`
+ - b\ :sub:`10`
+ - b\ :sub:`9`
+ - b\ :sub:`8`
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`15`
+ - g\ :sub:`14`
+ - g\ :sub:`13`
+ - g\ :sub:`12`
+ - g\ :sub:`11`
+ - g\ :sub:`10`
+ - g\ :sub:`9`
+ - g\ :sub:`8`
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
.. raw:: latex
@@ -5072,6 +5222,148 @@ the following codes.
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VYYUYY10_4X20:
+
+ - MEDIA_BUS_FMT_VYYUYY10_4X20
+ - 0x2031
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-Y12-1X12:
- MEDIA_BUS_FMT_Y12_1X12
@@ -6829,6 +7121,185 @@ the following codes.
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-UYYVYY12-4X24:
+
+ - MEDIA_BUS_FMT_UYYVYY12_4X24
+ - 0x202d
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VYYUYY8-1X24:
+
+ - MEDIA_BUS_FMT_VYYUYY8_1X24
+ - 0x202c
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-YUV10-1X30:
- MEDIA_BUS_FMT_YUV10_1X30
@@ -6866,6 +7337,43 @@ the following codes.
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VUY10-1X30:
+
+ - MEDIA_BUS_FMT_VUY10_1X30
+ - 0x2032
+ -
+ -
+ -
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-UYYVYY10-0-5X30:
- MEDIA_BUS_FMT_UYYVYY10_0_5X30
@@ -6938,6 +7446,43 @@ the following codes.
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-Y16-1X16:
+
+ - MEDIA_BUS_FMT_Y16_1X16
+ - 0x202d
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-AYUV8-1X32:
- MEDIA_BUS_FMT_AYUV8_1X32
@@ -6975,6 +7520,220 @@ the following codes.
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-UYYVYY16-4X32:
+
+ - MEDIA_BUS_FMT_UYYVYY16_4X32
+ - 0x202f
+ -
+ - u\ :sub:`15`
+ - u\ :sub:`14`
+ - u\ :sub:`13`
+ - u\ :sub:`12`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ - v\ :sub:`15`
+ - v\ :sub:`14`
+ - v\ :sub:`13`
+ - v\ :sub:`12`
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-UYVY16-2X32:
+
+ - MEDIA_BUS_FMT_UYVY16_2X32
+ - 0x2030
+ -
+ - u\ :sub:`15`
+ - u\ :sub:`14`
+ - u\ :sub:`13`
+ - u\ :sub:`12`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ - v\ :sub:`15`
+ - v\ :sub:`14`
+ - v\ :sub:`13`
+ - v\ :sub:`12`
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
.. raw:: latex
@@ -7163,6 +7922,47 @@ The following table list existing packed 36bit wide YUV formats.
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VUY12-1X36:
+
+ - MEDIA_BUS_FMT_VUY12_1X36
+ - 0x2033
+ -
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
.. raw:: latex
@@ -7333,6 +8133,78 @@ The following table list existing packed 48bit wide YUV formats.
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VUY16-1X48:
+
+ - MEDIA_BUS_FMT_VUY16_1X48
+ - 0x2034
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`15`
+ - v\ :sub:`14`
+ - v\ :sub:`13`
+ - v\ :sub:`12`
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ * -
+ -
+ -
+ - u\ :sub:`15`
+ - u\ :sub:`14`
+ - u\ :sub:`13`
+ - u\ :sub:`12`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`8`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-UYYVYY16-0-5X48:
- MEDIA_BUS_FMT_UYYVYY16_0_5X48
diff --git a/Documentation/misc-devices/xilinx_sdfec.rst b/Documentation/misc-devices/xilinx_sdfec.rst
new file mode 100644
index 000000000000..1b459c0b5bc3
--- /dev/null
+++ b/Documentation/misc-devices/xilinx_sdfec.rst
@@ -0,0 +1,291 @@
+====================
+Xilinx SD-FEC Driver
+====================
+
+.. toctree::
+ :maxdepth: 4
+ :caption: Table of Contents
+
+ xilinx_sdfec
+
+Overview
+========
+
+This driver supports SD-FEC Integrated Block for Zynq |Ultrascale+ (TM)| RFSoCs.
+
+.. |Ultrascale+ (TM)| unicode:: Ultrascale+ U+2122
+ .. with trademark sign
+
+For a full description of SD-FEC core features, see the `SD-FEC Product Guide (PG256) <https://www.xilinx.com/cgi-bin/docs/ipdoc?c=sd_fec;v=latest;d=pg256-sdfec-integrated-block.pdf>`_
+
+This driver supports the following features:
+
+ - Retrieval of the Integrated Block configuration and status information
+ - Configuration of LDPC codes
+ - Configuration of Turbo decoding
+ - Monitoring errors
+
+Missing features, known issues, and limitations of the SD-FEC driver are as
+follows:
+
+ - Only allows a single open file handler to any instance of the driver at any time
+ - Reset of the SD-FEC Integrated Block is not controlled by this driver
+ - Does not support shared LDPC code table wraparound
+
+The device tree entry is described in:
+`linux-xlnx/Documentation/devicetree/bindings/misc/xlnx,sd-fec.txt <https://github.com/Xilinx/linux-xlnx/blob/master/Documentation/devicetree/bindings/misc/xlnx%2Csd-fec.txt>`_
+
+
+Modes of Operation
+------------------
+
+The driver works with the SD-FEC core in two modes of operation:
+
+ - Run-time configuration
+ - Programmable Logic (PL) initialization
+
+
+Run-time Configuration
+~~~~~~~~~~~~~~~~~~~~~~
+
+For Run-time configuration the role of driver is to allow the software application to do the following:
+
+ - Load the configuration parameters for either Turbo decode or LDPC encode or decode
+ - Activate the SD-FEC core
+ - Monitor the SD-FEC core for errors
+ - Retrieve the status and configuration of the SD-FEC core
+
+Programmable Logic (PL) Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For PL initialization, supporting logic loads configuration parameters for either
+the Turbo decode or LDPC encode or decode. The role of the driver is to allow
+the software application to do the following:
+
+ - Activate the SD-FEC core
+ - Monitor the SD-FEC core for errors
+ - Retrieve the status and configuration of the SD-FEC core
+
+
+Driver Structure
+================
+
+The driver provides a platform device where the ``probe`` and ``remove``
+operations are provided.
+
+ - probe: Updates configuration register with device-tree entries plus determines the current activate state of the core, for example, is the core bypassed or has the core been started.
+
+
+The driver defines the following driver file operations to provide user
+application interfaces:
+
+ - open: Implements restriction that only a single file descriptor can be open per SD-FEC instance at any time
+ - release: Allows another file descriptor to be open, that is after current file descriptor is closed
+ - poll: Provides a method to monitor for SD-FEC Error events
+ - unlocked_ioctl: Provides the the following ioctl commands that allows the application configure the SD-FEC core:
+
+ - :c:macro:`XSDFEC_START_DEV`
+ - :c:macro:`XSDFEC_STOP_DEV`
+ - :c:macro:`XSDFEC_GET_STATUS`
+ - :c:macro:`XSDFEC_SET_IRQ`
+ - :c:macro:`XSDFEC_SET_TURBO`
+ - :c:macro:`XSDFEC_ADD_LDPC_CODE_PARAMS`
+ - :c:macro:`XSDFEC_GET_CONFIG`
+ - :c:macro:`XSDFEC_SET_ORDER`
+ - :c:macro:`XSDFEC_SET_BYPASS`
+ - :c:macro:`XSDFEC_IS_ACTIVE`
+ - :c:macro:`XSDFEC_CLEAR_STATS`
+ - :c:macro:`XSDFEC_SET_DEFAULT_CONFIG`
+
+
+Driver Usage
+============
+
+
+Overview
+--------
+
+After opening the driver, the user should find out what operations need to be
+performed to configure and activate the SD-FEC core and determine the
+configuration of the driver.
+The following outlines the flow the user should perform:
+
+ - Determine Configuration
+ - Set the order, if not already configured as desired
+ - Set Turbo decode, LPDC encode or decode parameters, depending on how the
+ SD-FEC core is configured plus if the SD-FEC has not been configured for PL
+ initialization
+ - Enable interrupts, if not already enabled
+ - Bypass the SD-FEC core, if required
+ - Start the SD-FEC core if not already started
+ - Get the SD-FEC core status
+ - Monitor for interrupts
+ - Stop the SD-FEC core
+
+
+Note: When monitoring for interrupts if a critical error is detected where a reset is required, the driver will be required to load the default configuration.
+
+
+Determine Configuration
+-----------------------
+
+Determine the configuration of the SD-FEC core by using the ioctl
+:c:macro:`XSDFEC_GET_CONFIG`.
+
+Set the Order
+-------------
+
+Setting the order determines how the order of Blocks can change from input to output.
+
+Setting the order is done by using the ioctl :c:macro:`XSDFEC_SET_ORDER`
+
+Setting the order can only be done if the following restrictions are met:
+
+ - The ``state`` member of struct :c:type:`xsdfec_status <xsdfec_status>` filled by the ioctl :c:macro:`XSDFEC_GET_STATUS` indicates the SD-FEC core has not STARTED
+
+
+Add LDPC Codes
+--------------
+
+The following steps indicate how to add LDPC codes to the SD-FEC core:
+
+ - Use the auto-generated parameters to fill the :c:type:`struct xsdfec_ldpc_params <xsdfec_ldpc_params>` for the desired LDPC code.
+ - Set the SC, QA, and LA table offsets for the LPDC parameters and the parameters in the structure :c:type:`struct xsdfec_ldpc_params <xsdfec_ldpc_params>`
+ - Set the desired Code Id value in the structure :c:type:`struct xsdfec_ldpc_params <xsdfec_ldpc_params>`
+ - Add the LPDC Code Parameters using the ioctl :c:macro:`XSDFEC_ADD_LDPC_CODE_PARAMS`
+ - For the applied LPDC Code Parameter use the function :c:func:`xsdfec_calculate_shared_ldpc_table_entry_size` to calculate the size of shared LPDC code tables. This allows the user to determine the shared table usage so when selecting the table offsets for the next LDPC code parameters unused table areas can be selected.
+ - Repeat for each LDPC code parameter.
+
+Adding LDPC codes can only be done if the following restrictions are met:
+
+ - The ``code`` member of :c:type:`struct xsdfec_config <xsdfec_config>` filled by the ioctl :c:macro:`XSDFEC_GET_CONFIG` indicates the SD-FEC core is configured as LDPC
+ - The ``code_wr_protect`` of :c:type:`struct xsdfec_config <xsdfec_config>` filled by the ioctl :c:macro:`XSDFEC_GET_CONFIG` indicates that write protection is not enabled
+ - The ``state`` member of struct :c:type:`xsdfec_status <xsdfec_status>` filled by the ioctl :c:macro:`XSDFEC_GET_STATUS` indicates the SD-FEC core has not started
+
+Set Turbo Decode
+----------------
+
+Configuring the Turbo decode parameters is done by using the ioctl :c:macro:`XSDFEC_SET_TURBO` using auto-generated parameters to fill the :c:type:`struct xsdfec_turbo <xsdfec_turbo>` for the desired Turbo code.
+
+Adding Turbo decode can only be done if the following restrictions are met:
+
+ - The ``code`` member of :c:type:`struct xsdfec_config <xsdfec_config>` filled by the ioctl :c:macro:`XSDFEC_GET_CONFIG` indicates the SD-FEC core is configured as TURBO
+ - The ``state`` member of struct :c:type:`xsdfec_status <xsdfec_status>` filled by the ioctl :c:macro:`XSDFEC_GET_STATUS` indicates the SD-FEC core has not STARTED
+
+Enable Interrupts
+-----------------
+
+Enabling or disabling interrupts is done by using the ioctl :c:macro:`XSDFEC_SET_IRQ`. The members of the parameter passed, :c:type:`struct xsdfec_irq <xsdfec_irq>`, to the ioctl are used to set and clear different categories of interrupts. The category of interrupt is controlled as following:
+
+ - ``enable_isr`` controls the ``tlast`` interrupts
+ - ``enable_ecc_isr`` controls the ECC interrupts
+
+If the ``code`` member of :c:type:`struct xsdfec_config <xsdfec_config>` filled by the ioctl :c:macro:`XSDFEC_GET_CONFIG` indicates the SD-FEC core is configured as TURBO then the enabling ECC errors is not required.
+
+Bypass the SD-FEC
+-----------------
+
+Bypassing the SD-FEC is done by using the ioctl :c:macro:`XSDFEC_SET_BYPASS`
+
+Bypassing the SD-FEC can only be done if the following restrictions are met:
+
+ - The ``state`` member of :c:type:`struct xsdfec_status <xsdfec_status>` filled by the ioctl :c:macro:`XSDFEC_GET_STATUS` indicates the SD-FEC core has not STARTED
+
+Start the SD-FEC core
+---------------------
+
+Start the SD-FEC core by using the ioctl :c:macro:`XSDFEC_START_DEV`
+
+Get SD-FEC Status
+-----------------
+
+Get the SD-FEC status of the device by using the ioctl :c:macro:`XSDFEC_GET_STATUS`, which will fill the :c:type:`struct xsdfec_status <xsdfec_status>`
+
+Monitor for Interrupts
+----------------------
+
+ - Use the poll system call to monitor for an interrupt. The poll system call waits for an interrupt to wake it up or times out if no interrupt occurs.
+ - On return Poll ``revents`` will indicate whether stats and/or state have been updated
+ - ``POLLPRI`` indicates a critical error and the user should use :c:macro:`XSDFEC_GET_STATUS` and :c:macro:`XSDFEC_GET_STATS` to confirm
+ - ``POLLRDNORM`` indicates a non-critical error has occurred and the user should use :c:macro:`XSDFEC_GET_STATS` to confirm
+ - Get stats by using the ioctl :c:macro:`XSDFEC_GET_STATS`
+ - For critical error the ``isr_err_count`` or ``uecc_count`` member of :c:type:`struct xsdfec_stats <xsdfec_stats>` is non-zero
+ - For non-critical errors the ``cecc_count`` member of :c:type:`struct xsdfec_stats <xsdfec_stats>` is non-zero
+ - Get state by using the ioctl :c:macro:`XSDFEC_GET_STATUS`
+ - For a critical error the ``state`` of :c:type:`xsdfec_status <xsdfec_status>` will indicate a Reset Is Required
+ - Clear stats by using the ioctl :c:macro:`XSDFEC_CLEAR_STATS`
+
+If a critical error is detected where a reset is required. The application is required to call the ioctl :c:macro:`XSDFEC_SET_DEFAULT_CONFIG`, after the reset and it is not required to call the ioctl :c:macro:`XSDFEC_STOP_DEV`
+
+Note: Using poll system call prevents busy looping using :c:macro:`XSDFEC_GET_STATS` and :c:macro:`XSDFEC_GET_STATUS`
+
+Stop the SD-FEC Core
+---------------------
+
+Stop the device by using the ioctl :c:macro:`XSDFEC_STOP_DEV`
+
+Set the Default Configuration
+-----------------------------
+
+Load default configuration by using the ioctl :c:macro:`XSDFEC_SET_DEFAULT_CONFIG` to restore the driver.
+
+Driver IOCTLs
+==============
+
+.. c:macro:: XSDFEC_START_DEV
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_START_DEV
+
+.. c:macro:: XSDFEC_STOP_DEV
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_STOP_DEV
+
+.. c:macro:: XSDFEC_GET_STATUS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_GET_STATUS
+
+.. c:macro:: XSDFEC_SET_IRQ
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_IRQ
+
+.. c:macro:: XSDFEC_SET_TURBO
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_TURBO
+
+.. c:macro:: XSDFEC_ADD_LDPC_CODE_PARAMS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_ADD_LDPC_CODE_PARAMS
+
+.. c:macro:: XSDFEC_GET_CONFIG
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_GET_CONFIG
+
+.. c:macro:: XSDFEC_SET_ORDER
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_ORDER
+
+.. c:macro:: XSDFEC_SET_BYPASS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_BYPASS
+
+.. c:macro:: XSDFEC_IS_ACTIVE
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_IS_ACTIVE
+
+.. c:macro:: XSDFEC_CLEAR_STATS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_CLEAR_STATS
+
+.. c:macro:: XSDFEC_GET_STATS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_GET_STATS
+
+.. c:macro:: XSDFEC_SET_DEFAULT_CONFIG
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_DEFAULT_CONFIG
+
+Driver Type Definitions
+=======================
+
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :internal: \ No newline at end of file
diff --git a/Documentation/misc-devices/xilinx_trafgen.txt b/Documentation/misc-devices/xilinx_trafgen.txt
new file mode 100644
index 000000000000..473abc9628f6
--- /dev/null
+++ b/Documentation/misc-devices/xilinx_trafgen.txt
@@ -0,0 +1,87 @@
+Kernel driver xilinx_trafgen
+============================
+
+Supported chips:
+Zynq SOC, Xilinx 7 series fpga's (Virtex,Kintex,Artix)
+
+Data Sheet:
+ http://www.xilinx.com/support/documentation/ip_documentation/axi_traffic_gen/v2_0/pg125-axi-traffic-gen.pdf
+
+Author:
+ Kedareswara Rao Appana <appanad@xilinx.com>
+
+Description
+-----------
+
+AXI Traffic Generator IP is a core that stresses the AXI4 interconnect and other
+AXI4 peripherals in the system. It generates a wide variety of AXI4 transactions
+based on the core programming.
+
+Features:
+---> Configurable option to generate and accept data according to different
+traffic profiles.
+---> Supports dependent/independent transaction between read/write master port
+with configurable delays.
+---> Programmable repeat count for each transaction with
+constant/increment/random address.
+---> External start/stop to generate traffic without processor intervention.
+---> Generates IP-specific traffic on AXI interface for pre-defined protocols.
+
+SYSFS:
+
+id
+ RO - shows the trafgen id.
+
+resource
+ RO - shows the baseaddress for the trafgen.
+
+master_start_stop
+ RW - monitors the master start logic.
+
+config_slave_status
+ RW - configure and monitors the slave status.
+
+err_sts
+ RW - get the error statistics/clear the errors.
+
+err_en
+ WO - enable the errors.
+
+intr_en
+ WO - enable the interrupts.
+
+last_valid_index
+ RW - gets the last valid index value.
+
+config_sts_show
+ RO - show the config status value.
+
+mram_clear
+ WO - clears the master ram.
+
+cram_clear
+ WO - clears the command ram.
+
+pram_clear
+ WO - clears the parameter ram.
+
+static_enable
+ RO - enables the static mode.
+
+static_burst_len
+ RW - gets/sets the static burst length.
+
+static_transferdone
+ RW - monitors the static transfer done status.
+
+reset_static_transferdone
+ RO - resets the static transferdone bit.
+
+stream_enable
+ RO - enables the streaming mode.
+
+stream_transferlen
+ RW - get/set the streaming mode transfer length.
+
+stream_transfercnt
+ RW - get/set the streaming mode transfer count.
diff --git a/MAINTAINERS b/MAINTAINERS
index f5e7745c6d4b..80b9fe29d752 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2715,8 +2715,11 @@ F: include/uapi/linux/atm*
ATMEL MACB ETHERNET DRIVER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
+L: git@xilinx.com
S: Supported
F: drivers/net/ethernet/cadence/
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18841740/Macb+Driver
ATMEL MAXTOUCH DRIVER
M: Nick Dyer <nick@shmanahar.org>
@@ -3536,6 +3539,13 @@ S: Supported
F: Documentation/filesystems/caching/cachefiles.txt
F: fs/cachefiles/
+CADENCE I2C DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+R: Venkata Visweswarachari Mallavarapu <vmallava@xilinx.com>
+L: git@xilinx.com
+F: drivers/i2c/busses/i2c-cadence.c
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18842160/Cadence+I2C+Driver
+
CADENCE MIPI-CSI2 BRIDGES
M: Maxime Ripard <maxime.ripard@bootlin.com>
L: linux-media@vger.kernel.org
@@ -3826,6 +3836,7 @@ F: Documentation/translations/zh_CN/
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
M: Peter Chen <Peter.Chen@nxp.com>
+R: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
L: linux-usb@vger.kernel.org
S: Maintained
@@ -4639,6 +4650,7 @@ F: drivers/usb/dwc2/
DESIGNWARE USB3 DRD IP DRIVER
M: Felipe Balbi <balbi@kernel.org>
+R: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
L: linux-usb@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
S: Maintained
@@ -5439,6 +5451,15 @@ F: drivers/gpu/drm/etnaviv/
F: include/uapi/drm/etnaviv_drm.h
F: Documentation/devicetree/bindings/display/etnaviv/
+DRM DRIVERS FOR XILINX
+M: Hyun Kwon <hyun.kwon@xilinx.com>
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/xlnx/
+F: Documentation/devicetree/bindings/display/xlnx/
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
DRM DRIVERS FOR ZTE ZX
M: Shawn Guo <shawnguo@kernel.org>
L: dri-devel@lists.freedesktop.org
@@ -8953,6 +8974,12 @@ F: drivers/ata/pata_ftide010.c
F: drivers/ata/sata_gemini.c
F: drivers/ata/sata_gemini.h
+LIBATA SATA AHCI CEVA CONTROLLER DRIVER
+M: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
+S: Maintained
+F: drivers/ata/ahci_ceva.c
+F: Documentation/devicetree/bindings/ata/ahci-ceva.txt
+
LIBATA SATA AHCI PLATFORM devices support
M: Hans de Goede <hdegoede@redhat.com>
M: Jens Axboe <axboe@kernel.dk>
@@ -16561,6 +16588,7 @@ F: drivers/net/wireless/rndis_wlan.c
USB XHCI DRIVER
M: Mathias Nyman <mathias.nyman@intel.com>
+R: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
L: linux-usb@vger.kernel.org
S: Supported
F: drivers/usb/host/xhci*
@@ -16618,6 +16646,11 @@ F: Documentation/driver-api/uio-howto.rst
F: drivers/uio/
F: include/linux/uio_driver.h
+USERSPACE I/O (UIO) DRIVER FOR XILINX AI ENGINE NPI
+M: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+F: Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt
+
UTIL-LINUX PACKAGE
M: Karel Zak <kzak@redhat.com>
L: util-linux@vger.kernel.org
@@ -17392,11 +17425,196 @@ S: Supported
F: Documentation/filesystems/xfs.txt
F: fs/xfs/
+XILINX AUDIO FORMATTER (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
+F: sound/soc/xilinx/xlnx_formatter_pcm.c
+
+XILINX AXI DMAENGINE DRIVER
+M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
+S: Maintained
+F: drivers/dma/xilinx/xilinx_dma.c
+F: Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+
XILINX AXI ETHERNET DRIVER
M: Anirudha Sarangi <anirudh@xilinx.com>
M: John Linn <John.Linn@xilinx.com>
+M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
S: Maintained
F: drivers/net/ethernet/xilinx/xilinx_axienet*
+F: Documentation/devicetree/bindings/net/xilinx_axienet.txt
+
+XILINX AXI SPI DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+R: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+S: Maintained
+F: drivers/spi/spi-xilinx.c
+F: Documentation/devicetree/bindings/spi/spi-xilinx.txt
+
+XILINX AXI USB DRIVER
+M: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
+S: Maintained
+F: drivers/usb/gadget/udc/udc-xilinx.c
+F: Documentation/devicetree/bindings/usb/udc-xilinx.txt
+
+XILINX AXI PERFORMANCE MONITOR DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+S: Maintained
+F: drivers/uio/uio_xilinx_apm.c
+F: Documentation/devicetree/bindings/uio/xilinx_apm.txt
+
+XILINX CAN DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+R: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/net/can/xilinx_can.c
+F: Documentation/devicetree/bindings/net/can/xilinx_can.txt
+
+XILINX DMA FRAMEBUFFER READ/WRITE DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt
+F: drivers/dma/xilinx/xilinx_frmbuf.c
+F: include/linux/dma/xilinx_frmbuf.h
+
+XILINX EMACLITE DRIVER
+M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
+S: Maintained
+F: drivers/net/ethernet/xilinx/xilinx_emaclite.c
+F: Documentation/devicetree/bindings/net/xilinx_emaclite.txt
+
+XILINX GMII2RGMII DRIVER
+M: Harini Katakam <harini.katakam@xilinx.com>
+R: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/net/phy/xilinx_gmii2rgmii.c
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18842246/Xilinx+GMII2RGMII+convertor
+
+XILINX GQSPI ZYNQMP SPI DRIVER
+M: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+R: Amit Kumar Mahapatra <amit.kumar-mahapatra@xilinx.com>
+F: drivers/spi/spi-zynqmp-gqspi.c
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18841754/Zynqmp+QSPI+Driver
+
+XILINX I2S AUDIO (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/sound/xlnx,i2s.txt
+F: sound/soc/xilinx/xlnx_i2s.c
+
+XILINX MEDIA AXIS SWITCH DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt
+F: drivers/media/platform/xilinx/xilinx-axis-switch.c
+
+XILINX MEDIA CSI2RXSS DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+L: git@xilinx.com
+S: Maintained
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt
+F: drivers/media/platform/xilinx/xilinx-csi2rxss.c
+F: include/uapi/linux/xilinx-csi2rxss.h
+
+XILINX MEDIA DEMOSAIC DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+L: git@xilinx.com
+S: Maintained
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt
+F: drivers/media/platform/xilinx/xilinx-demosaic.c
+
+XILINX MEDIA GAMMA DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt
+F: drivers/media/platform/xilinx/xilinx-gamma.c
+F: drivers/media/platform/xilinx/xilinx-gamma-coeff.h
+
+XILINX MEDIA SDI RX DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt
+F: drivers/media/platform/xilinx/xilinx-sdirxss.c
+F: include/uapi/linux/xilinx-sdirxss.h
+
+XILINX MEDIA VPSS COLOR SPACE CONVERTER DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+L: git@xilinx.com
+S: Maintained
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt
+F: drivers/media/platform/xilinx/xilinx-vpss-csc.c
+
+XILINX MEDIA VPSS SCALER DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt
+F: drivers/media/platform/xilinx/xilinx-vpss-scaler.c
+
+XILINX PL SOUND CARD (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: sound/soc/xilinx/xlnx_pl_snd_card.c
+
+XILINX QSPI ZYNQ SPI DRIVER
+M: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+R: Amit Kumar Mahapatra <amit.kumar-mahapatra@xilinx.com>
+F: drivers/spi/spi-zynq-qspi.c
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18842262/Zynq+QSPI+Driver
+
+XILINX SD-FEC IP CORES
+M: Derek Kiernan <derek.kiernan@xilinx.com>
+M: Dragan Cvetic <dragan.cvetic@xilinx.com>
+S: Maintained
+F: Documentation/devicetree/bindings/misc/xlnx,sd-fec.txt
+F: Documentation/misc-devices/xilinx_sdfec.rst
+F: drivers/misc/xilinx_sdfec.c
+F: drivers/misc/Kconfig
+F: drivers/misc/Makefile
+F: include/uapi/misc/xilinx_sdfec.h
+
+XILINX SDI AUDIO (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt
+F: sound/soc/xilinx/xlnx_sdi_audio.c
+
+XILINX SPDIF AUDIO (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/sound/xlnx,spdif.txt
+F: sound/soc/xilinx/xlnx_spdif.c
XILINX UARTLITE SERIAL DRIVER
M: Peter Korsgaard <jacmet@sunsite.dk>
@@ -17404,6 +17622,12 @@ L: linux-serial@vger.kernel.org
S: Maintained
F: drivers/tty/serial/uartlite.c
+XILINX UARTPS DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+S: Maintained
+F: drivers/tty/serial/xilinx_uartps.c
+F: Documentation/devicetree/bindings/serial/cdns,uart.txt
+
XILINX VIDEO IP CORES
M: Hyun Kwon <hyun.kwon@xilinx.com>
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
@@ -17414,6 +17638,71 @@ F: Documentation/devicetree/bindings/media/xilinx/
F: drivers/media/platform/xilinx/
F: include/uapi/linux/xilinx-v4l2-controls.h
+XILINX ZYNQ FPGA MANAGER DRIVER
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/fpga/zynq-fpga.c
+F: Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.txt
+
+XILINX ZYNQMP AES DRIVER
+M: Kalyani Akula <kalyani.akula@xilinx.com>
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/crypto/zynqmp-aes.c
+F: Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt
+
+XILINX ZYNQMP DMAENGINE DRIVER
+M: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+R: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
+S: Maintained
+F: drivers/dma/xilinx/zynqmp_dma.c
+F: Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt
+
+XILINX ZYNQMP FPGA MANAGER DRIVER
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/fpga/zynqmp-fpga.c
+F: Documentation/devicetree/bindings/fpga/xilinx-zynqmp-fpga-mgr.txt
+
+ZYNQMP IPI MAILBOX CONTROLLER DRIVER
+M: Wendy Liang <wendy.liang@xilinx.com>
+S: Maintained
+F: drivers/mailbox/zynqmp-ipi-mailbox.c
+F: include/linux/mailbox/zynqmp-ipi-message.h
+F: Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt
+
+XILINX ZYNQMP PHY DRIVER
+M: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
+S: Maintained
+F: drivers/phy/phy-zynqmp.c
+F: Documentation/devicetree/bindings/phy/phy-zynqmp.txt
+
+XILINX ZYNQMP R5 REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
+M: Wendy Liang <wendy.liang@xilinx.com>
+S: Maintained
+F: Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt
+F: drivers/remoteproc/zynqmp_r5_remoteproc.c
+
+XILINX ZYNQMP RSA DRIVER
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+M: Kalyani Akula <kalyani.akula@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/crypto/zynqmp-rsa.c
+F: Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt
+
+XILINX ZYNQMP SHA DRIVER
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+M: Kalyani Akula <kalyani.akula@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/crypto/zynqmp-sha.c
+F: Documentation/devicetree/bindings/crypto/zynqmp-sha.txt
+
XILLYBUS DRIVER
M: Eli Billauer <eli.billauer@gmail.com>
L: linux-kernel@vger.kernel.org
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
index ca6425ad794c..5602f4f3ad1c 100644
--- a/arch/arm/boot/dts/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2011 - 2015 Xilinx
*/
/ {
@@ -60,6 +60,7 @@
};
amba: amba {
+ u-boot,dm-pre-reloc;
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -155,6 +156,13 @@
reg = <0xf8006000 0x1000>;
};
+ ocmc: ocmc@f800c000 {
+ compatible = "xlnx,zynq-ocmc-1.0";
+ interrupt-parent = <&intc>;
+ interrupts = <0 3 4>;
+ reg = <0xf800c000 0x1000>;
+ };
+
uart0: serial@e0000000 {
compatible = "xlnx,xuartps", "cdns,uart-r1p8";
status = "disabled";
@@ -197,6 +205,45 @@
#size-cells = <0>;
};
+ qspi: spi@e000d000 {
+ clock-names = "ref_clk", "pclk";
+ clocks = <&clkc 10>, <&clkc 43>;
+ compatible = "xlnx,zynq-qspi-1.0";
+ status = "disabled";
+ interrupt-parent = <&intc>;
+ interrupts = <0 19 4>;
+ reg = <0xe000d000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ smcc: memory-controller@e000e000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+ clock-names = "memclk", "apb_pclk";
+ clocks = <&clkc 11>, <&clkc 44>;
+ compatible = "arm,pl353-smc-r2p1", "arm,primecell";
+ interrupt-parent = <&intc>;
+ interrupts = <0 18 4>;
+ ranges ;
+ reg = <0xe000e000 0x1000>;
+ nand0: flash@e1000000 {
+ status = "disabled";
+ compatible = "arm,pl353-nand-r2p1";
+ reg = <0xe1000000 0x1000000>;
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ };
+ nor0: flash@e2000000 {
+ status = "disabled";
+ compatible = "cfi-flash";
+ reg = <0xe2000000 0x2000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+
gem0: ethernet@e000b000 {
compatible = "cdns,zynq-gem", "cdns,gem";
reg = <0xe000b000 0x1000>;
@@ -240,15 +287,17 @@
};
slcr: slcr@f8000000 {
+ u-boot,dm-pre-reloc;
#address-cells = <1>;
#size-cells = <1>;
compatible = "xlnx,zynq-slcr", "syscon", "simple-mfd";
reg = <0xF8000000 0x1000>;
ranges;
clkc: clkc@100 {
+ u-boot,dm-pre-reloc;
#clock-cells = <1>;
compatible = "xlnx,ps7-clkc";
- fclk-enable = <0>;
+ fclk-enable = <0xf>;
clock-output-names = "armpll", "ddrpll", "iopll", "cpu_6or4x",
"cpu_3or2x", "cpu_2x", "cpu_1x", "ddr2x", "ddr3x",
"dci", "lqspi", "smc", "pcap", "gem0", "gem1",
@@ -297,14 +346,19 @@
devcfg: devcfg@f8007000 {
compatible = "xlnx,zynq-devcfg-1.0";
- reg = <0xf8007000 0x100>;
interrupt-parent = <&intc>;
interrupts = <0 8 4>;
- clocks = <&clkc 12>;
- clock-names = "ref_clk";
+ reg = <0xf8007000 0x100>;
+ clocks = <&clkc 12>, <&clkc 15>, <&clkc 16>, <&clkc 17>, <&clkc 18>;
+ clock-names = "ref_clk", "fclk0", "fclk1", "fclk2", "fclk3";
syscon = <&slcr>;
};
+ efuse: efuse@f800d000 {
+ compatible = "xlnx,zynq-efuse";
+ reg = <0xf800d000 0x20>;
+ };
+
global_timer: timer@f8f00200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0xf8f00200 0x20>;
@@ -365,5 +419,160 @@
reg = <0xf8005000 0x1000>;
timeout-sec = <10>;
};
+
+ etb@f8801000 {
+ compatible = "arm,coresight-etb10", "arm,primecell";
+ reg = <0xf8801000 0x1000>;
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+
+ port {
+ etb_in_port: endpoint {
+ remote-endpoint = <&replicator_out_port1>;
+ };
+ };
+ };
+
+ tpiu@f8803000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0xf8803000 0x1000>;
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+
+ port {
+ tpiu_in_port: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out_port0>;
+ };
+ };
+ };
+
+ funnel@0,f8804000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0xf8804000 0x1000>;
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+
+ /* funnel output ports */
+ out-ports {
+ port {
+ funnel_out_port: endpoint {
+ remote-endpoint =
+ <&replicator_in_port0>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* funnel input ports */
+ port@0 {
+ reg = <0>;
+ funnel0_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm0_out_port>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ funnel0_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&ptm1_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ funnel0_in_port2: endpoint {
+ slave-mode;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ funnel0_in_port3: endpoint {
+ slave-mode;
+ remote-endpoint = <&itm_out_port>;
+ };
+ };
+ /*The other input ports are not connect to anything */
+ };
+ };
+
+ replicator {
+ compatible = "arm,coresight-replicator";
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+
+ out-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* replicator output ports */
+ port@0 {
+ reg = <0>;
+ replicator_out_port0: endpoint {
+ remote-endpoint = <&tpiu_in_port>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ replicator_out_port1: endpoint {
+ remote-endpoint = <&etb_in_port>;
+ };
+ };
+ };
+ in-ports {
+ /* replicator input port */
+ port {
+ replicator_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel_out_port>;
+ };
+ };
+ };
+ };
+
+ itm@0,f8805000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0xf8805000 0x1000>;
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+
+ port {
+ itm_out_port: endpoint {
+ remote-endpoint = <&funnel0_in_port3>;
+ };
+ };
+ };
+
+ ptm@0,f889c000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0xf889c000 0x1000>;
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+ cpu = <&cpu0>;
+ port {
+ ptm0_out_port: endpoint {
+ remote-endpoint = <&funnel0_in_port0>;
+ };
+ };
+ };
+
+ ptm@0,f889d000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0xf889d000 0x1000>;
+ clocks = <&clkc 27>, <&clkc 46>, <&clkc 47>;
+ clock-names = "apb_pclk", "dbg_trc", "dbg_apb";
+ cpu = <&cpu1>;
+ port {
+ ptm1_out_port: endpoint {
+ remote-endpoint = <&funnel0_in_port1>;
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/zynq-cc108.dts b/arch/arm/boot/dts/zynq-cc108.dts
index 8b9ab9bba23b..64d73ecbc592 100644
--- a/arch/arm/boot/dts/zynq-cc108.dts
+++ b/arch/arm/boot/dts/zynq-cc108.dts
@@ -18,6 +18,7 @@
aliases {
ethernet0 = &gem0;
serial0 = &uart0;
+ spi0 = &qspi;
};
chosen {
@@ -52,6 +53,45 @@
};
};
+&qspi {
+ status = "okay";
+ is-dual = <0>;
+ num-cs = <1>;
+ flash@0 { /* 16 MB */
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot-bs";
+ reg = <0x0 0x400000>; /* 4MB */
+ };
+ partition@400000 {
+ label = "qspi-linux";
+ reg = <0x400000 0x400000>; /* 4MB */
+ };
+ partition@800000 {
+ label = "qspi-rootfs";
+ reg = <0x800000 0x400000>; /* 4MB */
+ };
+ partition@c00000 {
+ label = "qspi-devicetree";
+ reg = <0xc00000 0x100000>; /* 1MB */
+ };
+ partition@d00000 {
+ label = "qspi-scratch";
+ reg = <0xd00000 0x200000>; /* 2MB */
+ };
+ partition@f00000 {
+ label = "qspi-uboot-env";
+ reg = <0xf00000 0x100000>; /* 1MB */
+ };
+ };
+};
+
&sdhci1 {
status = "okay";
broken-cd ;
@@ -59,6 +99,7 @@
};
&uart0 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zc702.dts b/arch/arm/boot/dts/zynq-zc702.dts
index 27cd6cb52f1b..c9940fb366ce 100644
--- a/arch/arm/boot/dts/zynq-zc702.dts
+++ b/arch/arm/boot/dts/zynq-zc702.dts
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2011 - 2015 Xilinx
* Copyright (C) 2012 National Instruments Corp.
*/
/dts-v1/;
@@ -14,7 +14,9 @@
ethernet0 = &gem0;
i2c0 = &i2c0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
+ usb0 = &usb0;
};
memory@0 {
@@ -56,9 +58,12 @@
};
};
- usb_phy0: phy0 {
- compatible = "usb-nop-xceiv";
+ usb_phy0: phy0@e0002000 {
+ compatible = "ulpi-phy";
#phy-cells = <0>;
+ reg = <0xe0002000 0x1000>;
+ view-port = <0x0170>;
+ drv-vbus;
};
};
@@ -85,6 +90,8 @@
phy-handle = <&ethernet_phy>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gem0_default>;
+ phy-reset-gpio = <&gpio0 11 0>;
+ phy-reset-active-low;
ethernet_phy: ethernet-phy@7 {
reg = <7>;
@@ -100,8 +107,11 @@
&i2c0 {
status = "okay";
clock-frequency = <400000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio0 50 0>;
+ sda-gpios = <&gpio0 51 0>;
i2c-mux@74 {
compatible = "nxp,pca9548";
@@ -292,6 +302,19 @@
};
};
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_50_grp", "gpio0_51_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_50_grp", "gpio0_51_grp";
+ slew-rate = <0>;
+ io-standard = <1>;
+ };
+ };
+
pinctrl_sdhci0_default: sdhci0-default {
mux {
groups = "sdio0_2_grp";
@@ -380,13 +403,51 @@
};
};
+&qspi {
+ u-boot,dm-pre-reloc;
+ status = "okay";
+ is-dual = <0>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@qspi-fsbl-uboot {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@qspi-bitstream {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
+};
+
&sdhci0 {
+ u-boot,dm-pre-reloc;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdhci0_default>;
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1_default>;
diff --git a/arch/arm/boot/dts/zynq-zc706.dts b/arch/arm/boot/dts/zynq-zc706.dts
index 77943c16d33f..1a1b03a4223d 100644
--- a/arch/arm/boot/dts/zynq-zc706.dts
+++ b/arch/arm/boot/dts/zynq-zc706.dts
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2011 - 2015 Xilinx
* Copyright (C) 2012 National Instruments Corp.
*/
/dts-v1/;
@@ -14,6 +14,7 @@
ethernet0 = &gem0;
i2c0 = &i2c0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
};
@@ -27,9 +28,12 @@
stdout-path = "serial0:115200n8";
};
- usb_phy0: phy0 {
- compatible = "usb-nop-xceiv";
+ usb_phy0: phy0@e0002000 {
+ compatible = "ulpi-phy";
#phy-cells = <0>;
+ reg = <0xe0002000 0x1000>;
+ view-port = <0x0170>;
+ drv-vbus;
};
};
@@ -303,13 +307,51 @@
};
};
+&qspi {
+ u-boot,dm-pre-reloc;
+ status = "okay";
+ is-dual = <1>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@qspi-fsbl-uboot {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@qspi-bitstream {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
+};
+
&sdhci0 {
+ u-boot,dm-pre-reloc;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdhci0_default>;
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1_default>;
@@ -322,3 +364,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0_default>;
};
+
+&watchdog0 {
+ reset-on-timeout;
+};
diff --git a/arch/arm/boot/dts/zynq-zc770-xm010.dts b/arch/arm/boot/dts/zynq-zc770-xm010.dts
index 0dd352289a45..8596b4ce8c91 100644
--- a/arch/arm/boot/dts/zynq-zc770-xm010.dts
+++ b/arch/arm/boot/dts/zynq-zc770-xm010.dts
@@ -15,6 +15,7 @@
ethernet0 = &gem0;
i2c0 = &i2c0;
serial0 = &uart1;
+ spi0 = &qspi;
spi1 = &spi1;
};
@@ -57,7 +58,41 @@
compatible = "atmel,24c02";
reg = <0x52>;
};
+};
+&qspi {
+ status = "okay";
+ is-dual = <0>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@qspi-fsbl-uboot {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@qspi-bitstream {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
};
&sdhci0 {
@@ -68,7 +103,7 @@
status = "okay";
num-cs = <4>;
is-decoded-cs = <0>;
- flash@1 {
+ flash@0 {
compatible = "sst25wf080", "jedec,spi-nor";
reg = <1>;
spi-max-frequency = <1000000>;
@@ -85,6 +120,7 @@
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zc770-xm011.dts b/arch/arm/boot/dts/zynq-zc770-xm011.dts
index b7f65862c022..142e7263a177 100644
--- a/arch/arm/boot/dts/zynq-zc770-xm011.dts
+++ b/arch/arm/boot/dts/zynq-zc770-xm011.dts
@@ -47,6 +47,47 @@
};
};
+&nand0 {
+ status = "okay";
+ arm,nand-cycle-t0 = <0x4>;
+ arm,nand-cycle-t1 = <0x4>;
+ arm,nand-cycle-t2 = <0x1>;
+ arm,nand-cycle-t3 = <0x2>;
+ arm,nand-cycle-t4 = <0x2>;
+ arm,nand-cycle-t5 = <0x2>;
+ arm,nand-cycle-t6 = <0x4>;
+
+ partition@nand-fsbl-uboot {
+ label = "nand-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@nand-linux {
+ label = "nand-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@nand-device-tree {
+ label = "nand-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@nand-rootfs {
+ label = "nand-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@nand-bitstream {
+ label = "nand-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+};
+
+&smcc {
+ status = "okay";
+ arm,addr25 = <0x0>;
+ arm,nor-chip-sel0 = <0x0>;
+ arm,nor-chip-sel1 = <0x0>;
+ arm,sram-chip-sel0 = <0x0>;
+ arm,sram-chip-sel1 = <0x0>;
+};
+
&spi0 {
status = "okay";
num-cs = <4>;
@@ -54,6 +95,7 @@
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zc770-xm012.dts b/arch/arm/boot/dts/zynq-zc770-xm012.dts
index d2359b789eb8..e0e5980200cb 100644
--- a/arch/arm/boot/dts/zynq-zc770-xm012.dts
+++ b/arch/arm/boot/dts/zynq-zc770-xm012.dts
@@ -53,6 +53,47 @@
};
};
+&nor0 {
+ status = "okay";
+ bank-width = <1>;
+ xlnx,sram-cycle-t0 = <0xb>;
+ xlnx,sram-cycle-t1 = <0xb>;
+ xlnx,sram-cycle-t2 = <0x4>;
+ xlnx,sram-cycle-t3 = <0x4>;
+ xlnx,sram-cycle-t4 = <0x3>;
+ xlnx,sram-cycle-t5 = <0x3>;
+ xlnx,sram-cycle-t6 = <0x2>;
+ partition@nor-fsbl-uboot {
+ label = "nor-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@nor-linux {
+ label = "nor-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@nor-device-tree {
+ label = "nor-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@nor-rootfs {
+ label = "nor-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@nor-bitstream {
+ label = "nor-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+};
+
+&smcc {
+ status = "okay";
+ arm,addr25 = <0x1>;
+ arm,nor-chip-sel0 = <0x1>;
+ arm,nor-chip-sel1 = <0x0>;
+ arm,sram-chip-sel0 = <0x0>;
+ arm,sram-chip-sel1 = <0x0>;
+};
+
&spi1 {
status = "okay";
num-cs = <4>;
@@ -60,5 +101,6 @@
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zc770-xm013.dts b/arch/arm/boot/dts/zynq-zc770-xm013.dts
index 4ae2c85df3a0..d91330aab9b9 100644
--- a/arch/arm/boot/dts/zynq-zc770-xm013.dts
+++ b/arch/arm/boot/dts/zynq-zc770-xm013.dts
@@ -15,6 +15,7 @@
ethernet0 = &gem1;
i2c0 = &i2c1;
serial0 = &uart0;
+ spi0 = &qspi;
spi1 = &spi0;
};
@@ -58,11 +59,46 @@
};
};
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@qspi-fsbl-uboot {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@qspi-bitstream {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
+};
+
&spi0 {
status = "okay";
num-cs = <4>;
is-decoded-cs = <0>;
- eeprom: eeprom@2 {
+ eeprom: eeprom@0 {
at25,byte-len = <8192>;
at25,addr-mode = <2>;
at25,page-size = <32>;
@@ -74,5 +110,6 @@
};
&uart0 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zed.dts b/arch/arm/boot/dts/zynq-zed.dts
index 6a5a93aa6552..849240fbd076 100644
--- a/arch/arm/boot/dts/zynq-zed.dts
+++ b/arch/arm/boot/dts/zynq-zed.dts
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2011 - 2015 Xilinx
* Copyright (C) 2012 National Instruments Corp.
*/
/dts-v1/;
@@ -13,6 +13,7 @@
aliases {
ethernet0 = &gem0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
};
@@ -26,9 +27,12 @@
stdout-path = "serial0:115200n8";
};
- usb_phy0: phy0 {
- compatible = "usb-nop-xceiv";
+ usb_phy0: phy0@e0002000 {
+ compatible = "ulpi-phy";
#phy-cells = <0>;
+ reg = <0xe0002000 0x1000>;
+ view-port = <0x0170>;
+ drv-vbus;
};
};
@@ -47,11 +51,50 @@
};
};
+&qspi {
+ u-boot,dm-pre-reloc;
+ status = "okay";
+ is-dual = <0>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "spansion,s25fl256s", "spi-flash";
+ reg = <0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@qspi-fsbl-uboot {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@qspi-bitstream {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
+};
+
&sdhci0 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zturn.dts b/arch/arm/boot/dts/zynq-zturn.dts
index 5ec616ebca08..b38704657960 100644
--- a/arch/arm/boot/dts/zynq-zturn.dts
+++ b/arch/arm/boot/dts/zynq-zturn.dts
@@ -54,7 +54,7 @@
label = "K1";
gpios = <&gpio0 0x32 0x1>;
linux,code = <0x66>;
- wakeup-source;
+ gpio-key,wakeup;
autorepeat;
};
};
diff --git a/arch/arm/boot/dts/zynq-zybo.dts b/arch/arm/boot/dts/zynq-zybo.dts
index 755f6f109d5a..0ac54ebbdc8b 100644
--- a/arch/arm/boot/dts/zynq-zybo.dts
+++ b/arch/arm/boot/dts/zynq-zybo.dts
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2011 - 2015 Xilinx
* Copyright (C) 2012 National Instruments Corp.
*/
/dts-v1/;
@@ -13,6 +13,7 @@
aliases {
ethernet0 = &gem0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
};
@@ -48,11 +49,18 @@
};
};
+&qspi {
+ u-boot,dm-pre-reloc;
+ status = "okay";
+};
+
&sdhci0 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/configs/xilinx_zynq_defconfig b/arch/arm/configs/xilinx_zynq_defconfig
new file mode 100644
index 000000000000..031778caf47e
--- /dev/null
+++ b/arch/arm/configs/xilinx_zynq_defconfig
@@ -0,0 +1,242 @@
+CONFIG_LOCALVERSION="-xilinx"
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+# CONFIG_BUG is not set
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_ZYNQ=y
+CONFIG_PL310_ERRATA_588369=y
+CONFIG_PL310_ERRATA_727915=y
+CONFIG_PL310_ERRATA_769419=y
+CONFIG_ARM_ERRATA_754322=y
+CONFIG_ARM_ERRATA_754327=y
+CONFIG_ARM_ERRATA_764369=y
+CONFIG_ARM_ERRATA_775420=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIE_XILINX=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_SCHED_SMT=y
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_ZYNQ_CPUIDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_CAN=y
+CONFIG_CAN_XILINXCAN=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_PL353=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_OF_OVERLAY=y
+CONFIG_OF_CONFIGFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_XILINX_TRAFGEN=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_NETDEVICES=y
+CONFIG_MACB=y
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+CONFIG_E1000E=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_R8169=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_XILINX_EMACLITE=y
+CONFIG_XILINX_AXI_EMAC=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MARVELL_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_INPUT_SPARSEKMAP=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_CADENCE=y
+CONFIG_SPI=y
+CONFIG_SPI_CADENCE=y
+CONFIG_SPI_XILINX=y
+CONFIG_SPI_ZYNQ_QSPI=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_XILINX=y
+CONFIG_GPIO_ZYNQ=y
+CONFIG_PMBUS=y
+CONFIG_SENSORS_UCD9000=y
+CONFIG_SENSORS_UCD9200=y
+CONFIG_THERMAL=y
+CONFIG_CPU_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_XILINX_WATCHDOG=y
+CONFIG_CADENCE_WATCHDOG=y
+CONFIG_REGULATOR=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_XILINX=y
+CONFIG_VIDEO_XILINX_CFA=y
+CONFIG_VIDEO_XILINX_CRESAMPLE=y
+CONFIG_VIDEO_XILINX_REMAPPER=y
+CONFIG_VIDEO_XILINX_RGB2YUV=y
+CONFIG_VIDEO_XILINX_SCALER=y
+CONFIG_VIDEO_XILINX_SWITCH=y
+CONFIG_VIDEO_XILINX_TPG=y
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_VIDEO_ADV7604=y
+CONFIG_DRM=y
+CONFIG_DRM_XILINX=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_ADI=y
+CONFIG_SND_SOC_ADI_AXI_I2S=y
+CONFIG_SND_SOC_ADI_AXI_SPDIF=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_STORAGE=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_XILINX=y
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_ZERO=m
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_LEDS_TRIGGER_CAMERA=y
+CONFIG_EDAC=y
+CONFIG_EDAC_SYNOPSYS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PCF8563=y
+CONFIG_DMADEVICES=y
+CONFIG_PL330_DMA=y
+CONFIG_XILINX_DMA_ENGINES=y
+CONFIG_XILINX_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=y
+CONFIG_UIO_XILINX_APM=y
+CONFIG_COMMON_CLK_SI570=y
+CONFIG_REMOTEPROC=y
+CONFIG_ZYNQ_REMOTEPROC=m
+CONFIG_MEMORY=y
+CONFIG_IIO=y
+CONFIG_XILINX_XADC=y
+CONFIG_RAS=y
+CONFIG_FPGA=y
+CONFIG_FPGA_MGR_ZYNQ_FPGA=y
+CONFIG_FPGA_MGR_ZYNQ_AFI_FPGA=y
+CONFIG_FPGA_BRIDGE=y
+CONFIG_XILINX_PR_DECOUPLER=y
+CONFIG_FPGA_REGION=y
+CONFIG_OF_FPGA_REGION=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_FTRACE is not set
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 7a88f160b1fb..136a9506f1ed 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -7,7 +7,7 @@
#include <asm/irq.h>
/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
-#define NR_IPI 7
+#define NR_IPI 16
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a91f21e3c5b5..bbdfd74ff98a 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -120,4 +120,7 @@ struct of_cpu_method {
*/
extern void smp_set_ops(const struct smp_operations *);
+extern int set_ipi_handler(int ipinr, void *handler, char *desc);
+extern void clear_ipi_handler(int ipinr);
+
#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index a137608cd197..8003ab884f30 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -507,20 +507,59 @@ void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
__smp_cross_call = fn;
}
-static const char *ipi_types[NR_IPI] __tracepoint_string = {
-#define S(x,s) [x] = s
- S(IPI_WAKEUP, "CPU wakeup interrupts"),
- S(IPI_TIMER, "Timer broadcast interrupts"),
- S(IPI_RESCHEDULE, "Rescheduling interrupts"),
- S(IPI_CALL_FUNC, "Function call interrupts"),
- S(IPI_CPU_STOP, "CPU stop interrupts"),
- S(IPI_IRQ_WORK, "IRQ work interrupts"),
- S(IPI_COMPLETION, "completion interrupts"),
+struct ipi {
+ const char *desc;
+ void (*handler)(void);
+};
+
+static void ipi_cpu_stop(void);
+static void ipi_complete(void);
+
+#define IPI_DESC_STRING_IPI_WAKEUP "CPU wakeup interrupts"
+#define IPI_DESC_STRING_IPI_TIMER "Timer broadcast interrupts"
+#define IPI_DESC_STRING_IPI_RESCHEDULE "Rescheduling interrupts"
+#define IPI_DESC_STRING_IPI_CALL_FUNC "Function call interrupts"
+#define IPI_DESC_STRING_IPI_CPU_STOP "CPU stop interrupts"
+#define IPI_DESC_STRING_IPI_IRQ_WORK "IRQ work interrupts"
+#define IPI_DESC_STRING_IPI_COMPLETION "completion interrupts"
+
+#define IPI_DESC_STR(x) IPI_DESC_STRING_ ## x
+
+static const char* ipi_desc_strings[] __tracepoint_string =
+ {
+ [IPI_WAKEUP] = IPI_DESC_STR(IPI_WAKEUP),
+ [IPI_TIMER] = IPI_DESC_STR(IPI_TIMER),
+ [IPI_RESCHEDULE] = IPI_DESC_STR(IPI_RESCHEDULE),
+ [IPI_CALL_FUNC] = IPI_DESC_STR(IPI_CALL_FUNC),
+ [IPI_CPU_STOP] = IPI_DESC_STR(IPI_CPU_STOP),
+ [IPI_IRQ_WORK] = IPI_DESC_STR(IPI_IRQ_WORK),
+ [IPI_COMPLETION] = IPI_DESC_STR(IPI_COMPLETION),
+ };
+
+
+static void tick_receive_broadcast_local(void)
+{
+ tick_receive_broadcast();
+}
+
+static struct ipi ipi_types[NR_IPI] = {
+#define S(x, f) [x].desc = IPI_DESC_STR(x), [x].handler = f
+ S(IPI_WAKEUP, NULL),
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ S(IPI_TIMER, tick_receive_broadcast_local),
+#endif
+ S(IPI_RESCHEDULE, scheduler_ipi),
+ S(IPI_CALL_FUNC, generic_smp_call_function_interrupt),
+ S(IPI_CPU_STOP, ipi_cpu_stop),
+#ifdef CONFIG_IRQ_WORK
+ S(IPI_IRQ_WORK, irq_work_run),
+#endif
+ S(IPI_COMPLETION, ipi_complete),
};
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
- trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
+ trace_ipi_raise_rcuidle(target, ipi_desc_strings[ipinr]);
__smp_cross_call(target, ipinr);
}
@@ -529,13 +568,13 @@ void show_ipi_list(struct seq_file *p, int prec)
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
- seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
-
- for_each_online_cpu(cpu)
- seq_printf(p, "%10u ",
- __get_irq_stat(cpu, ipi_irqs[i]));
-
- seq_printf(p, " %s\n", ipi_types[i]);
+ if (ipi_types[i].handler) {
+ seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
+ for_each_present_cpu(cpu)
+ seq_printf(p, "%10u ",
+ __get_irq_stat(cpu, ipi_irqs[i]));
+ seq_printf(p, " %s\n", ipi_types[i].desc);
+ }
}
}
@@ -585,8 +624,10 @@ static DEFINE_RAW_SPINLOCK(stop_lock);
/*
* ipi_cpu_stop - handle IPI from smp_send_stop()
*/
-static void ipi_cpu_stop(unsigned int cpu)
+static void ipi_cpu_stop(void)
{
+ unsigned int cpu = smp_processor_id();
+
if (system_state <= SYSTEM_RUNNING) {
raw_spin_lock(&stop_lock);
pr_crit("CPU%u: stopping\n", cpu);
@@ -613,8 +654,10 @@ int register_ipi_completion(struct completion *completion, int cpu)
return IPI_COMPLETION;
}
-static void ipi_complete(unsigned int cpu)
+static void ipi_complete(void)
{
+ unsigned int cpu = smp_processor_id();
+
complete(per_cpu(cpu_completion, cpu));
}
@@ -631,71 +674,48 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
unsigned int cpu = smp_processor_id();
struct pt_regs *old_regs = set_irq_regs(regs);
- if ((unsigned)ipinr < NR_IPI) {
- trace_ipi_entry_rcuidle(ipi_types[ipinr]);
+ if (ipi_types[ipinr].handler) {
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
- }
-
- switch (ipinr) {
- case IPI_WAKEUP:
- break;
-
-#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
- case IPI_TIMER:
- irq_enter();
- tick_receive_broadcast();
- irq_exit();
- break;
-#endif
-
- case IPI_RESCHEDULE:
- scheduler_ipi();
- break;
-
- case IPI_CALL_FUNC:
irq_enter();
- generic_smp_call_function_interrupt();
+ (*ipi_types[ipinr].handler)();
irq_exit();
- break;
+ } else
+ pr_debug("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
- case IPI_CPU_STOP:
- irq_enter();
- ipi_cpu_stop(cpu);
- irq_exit();
- break;
+ set_irq_regs(old_regs);
+}
-#ifdef CONFIG_IRQ_WORK
- case IPI_IRQ_WORK:
- irq_enter();
- irq_work_run();
- irq_exit();
- break;
-#endif
+/*
+ * set_ipi_handler:
+ * Interface provided for a kernel module to specify an IPI handler function.
+ */
+int set_ipi_handler(int ipinr, void *handler, char *desc)
+{
+ unsigned int cpu = smp_processor_id();
- case IPI_COMPLETION:
- irq_enter();
- ipi_complete(cpu);
- irq_exit();
- break;
+ if (ipi_types[ipinr].handler) {
+ pr_crit("CPU%u: IPI handler 0x%x already registered to %pf\n",
+ cpu, ipinr, ipi_types[ipinr].handler);
+ return -1;
+ }
- case IPI_CPU_BACKTRACE:
- printk_nmi_enter();
- irq_enter();
- nmi_cpu_backtrace(regs);
- irq_exit();
- printk_nmi_exit();
- break;
+ ipi_types[ipinr].handler = handler;
+ ipi_types[ipinr].desc = desc;
- default:
- pr_crit("CPU%u: Unknown IPI message 0x%x\n",
- cpu, ipinr);
- break;
- }
+ return 0;
+}
+EXPORT_SYMBOL(set_ipi_handler);
- if ((unsigned)ipinr < NR_IPI)
- trace_ipi_exit_rcuidle(ipi_types[ipinr]);
- set_irq_regs(old_regs);
+/*
+ * clear_ipi_handler:
+ * Interface provided for a kernel module to clear an IPI handler function.
+ */
+void clear_ipi_handler(int ipinr)
+{
+ ipi_types[ipinr].handler = NULL;
+ ipi_types[ipinr].desc = NULL;
}
+EXPORT_SYMBOL(clear_ipi_handler);
void smp_send_reschedule(int cpu)
{
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig
index 1ca633e3d024..557bfe794d29 100644
--- a/arch/arm/mach-zynq/Kconfig
+++ b/arch/arm/mach-zynq/Kconfig
@@ -17,3 +17,19 @@ config ARCH_ZYNQ
select SOC_BUS
help
Support for Xilinx Zynq ARM Cortex A9 Platform
+
+if ARCH_ZYNQ
+
+menu "Xilinx Specific Options"
+
+config XILINX_PREFETCH
+ bool "Cache Prefetch"
+ default y
+ help
+ This option turns on L1 & L2 cache prefetching to get the best performance
+ in many cases. This may not always be the best performance depending on
+ the usage.
+
+endmenu
+
+endif
diff --git a/arch/arm/mach-zynq/Makefile b/arch/arm/mach-zynq/Makefile
index 9df74cd85fd0..dbb75be53deb 100644
--- a/arch/arm/mach-zynq/Makefile
+++ b/arch/arm/mach-zynq/Makefile
@@ -4,5 +4,9 @@
#
# Common support
-obj-y := common.o slcr.o pm.o
+obj-y := common.o efuse.o slcr.o zynq_ocm.o pm.o
+
obj-$(CONFIG_SMP) += headsmp.o platsmp.o
+ORIG_AFLAGS := $(KBUILD_AFLAGS)
+KBUILD_AFLAGS = $(subst -march=armv6k,,$(ORIG_AFLAGS))
+obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 3a4248fd7962..0a86e6fc8fb6 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -95,6 +95,7 @@ static void __init zynq_init_late(void)
{
zynq_core_pm_init();
zynq_pm_late_init();
+ zynq_prefetch_init();
}
/**
@@ -175,6 +176,7 @@ static void __init zynq_map_io(void)
static void __init zynq_irq_init(void)
{
+ zynq_early_efuse_init();
zynq_early_slcr_init();
irqchip_init();
}
@@ -186,8 +188,13 @@ static const char * const zynq_dt_match[] = {
DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform")
/* 64KB way size, 8-way associativity, parity disabled */
- .l2c_aux_val = 0x00400000,
+#ifdef CONFIG_XILINX_PREFETCH
+ .l2c_aux_val = 0x30400000,
+ .l2c_aux_mask = 0xcfbfffff,
+#else
+ .l2c_aux_val = 0x00400000,
.l2c_aux_mask = 0xffbfffff,
+#endif
.smp = smp_ops(zynq_smp_ops),
.map_io = zynq_map_io,
.init_irq = zynq_irq_init,
diff --git a/arch/arm/mach-zynq/common.h b/arch/arm/mach-zynq/common.h
index 60e662324699..5816d57e5a5d 100644
--- a/arch/arm/mach-zynq/common.h
+++ b/arch/arm/mach-zynq/common.h
@@ -15,8 +15,12 @@ extern void zynq_slcr_cpu_stop(int cpu);
extern void zynq_slcr_cpu_start(int cpu);
extern bool zynq_slcr_cpu_state_read(int cpu);
extern void zynq_slcr_cpu_state_write(int cpu, bool die);
+extern u32 zynq_slcr_get_ocm_config(void);
extern u32 zynq_slcr_get_device_id(void);
+extern bool zynq_efuse_cpu_state(int cpu);
+extern int zynq_early_efuse_init(void);
+
#ifdef CONFIG_SMP
extern char zynq_secondary_trampoline;
extern char zynq_secondary_trampoline_jump;
@@ -25,9 +29,31 @@ extern int zynq_cpun_start(u32 address, int cpu);
extern const struct smp_operations zynq_smp_ops;
#endif
+extern void zynq_slcr_init_preload_fpga(void);
+extern void zynq_slcr_init_postload_fpga(void);
+
+extern void __iomem *zynq_slcr_base;
extern void __iomem *zynq_scu_base;
void zynq_pm_late_init(void);
+extern unsigned int zynq_sys_suspend_sz;
+int zynq_sys_suspend(void __iomem *ddrc_base, void __iomem *slcr_base);
+
+static inline void zynq_prefetch_init(void)
+{
+ /*
+ * Enable prefetching in aux control register. L2 prefetch must
+ * only be enabled if the slave supports it (PL310 does)
+ */
+ asm volatile ("mrc p15, 0, r1, c1, c0, 1\n"
+#ifdef CONFIG_XILINX_PREFETCH
+ "orr r1, r1, #6\n"
+#else
+ "bic r1, r1, #6\n"
+#endif
+ "mcr p15, 0, r1, c1, c0, 1\n"
+ : : : "r1");
+}
static inline void zynq_core_pm_init(void)
{
diff --git a/arch/arm/mach-zynq/efuse.c b/arch/arm/mach-zynq/efuse.c
new file mode 100644
index 000000000000..d31a5822ec65
--- /dev/null
+++ b/arch/arm/mach-zynq/efuse.c
@@ -0,0 +1,75 @@
+/*
+ * Xilinx EFUSE driver
+ *
+ * Copyright (c) 2016 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include "common.h"
+
+#define EFUSE_STATUS_OFFSET 0x10
+
+/* 0 means cpu1 is working, 1 means cpu1 is broken */
+#define EFUSE_STATUS_CPU_BIT BIT(7)
+
+void __iomem *zynq_efuse_base;
+
+/**
+ * zynq_efuse_cpu_state - Read/write cpu state
+ * @cpu: cpu number
+ *
+ * Return: true if cpu is running, false if cpu is broken
+ */
+bool zynq_efuse_cpu_state(int cpu)
+{
+ u32 state;
+
+ if (!cpu)
+ return true;
+
+ state = readl(zynq_efuse_base + EFUSE_STATUS_OFFSET);
+ state &= EFUSE_STATUS_CPU_BIT;
+
+ if (!state)
+ return true;
+
+ return false;
+}
+
+/**
+ * zynq_early_efuse_init - Early efuse init function
+ *
+ * Return: 0 on success, negative errno otherwise.
+ *
+ * Called very early during boot from platform code.
+ */
+int __init zynq_early_efuse_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "xlnx,zynq-efuse");
+ if (!np) {
+ pr_err("%s: no efuse node found\n", __func__);
+ BUG();
+ }
+
+ zynq_efuse_base = of_iomap(np, 0);
+ if (!zynq_efuse_base) {
+ pr_err("%s: Unable to map I/O memory\n", __func__);
+ BUG();
+ }
+
+ np->data = (__force void *)zynq_efuse_base;
+
+ pr_info("%s mapped to %p\n", np->name, zynq_efuse_base);
+
+ of_node_put(np);
+
+ return 0;
+}
diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
index e65ee8180c35..5d76546fd8b2 100644
--- a/arch/arm/mach-zynq/platsmp.c
+++ b/arch/arm/mach-zynq/platsmp.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
#include <asm/smp_scu.h>
#include <linux/irqchip/arm-gic.h>
#include "common.h"
@@ -30,6 +31,7 @@ int zynq_cpun_start(u32 address, int cpu)
{
u32 trampoline_code_size = &zynq_secondary_trampoline_end -
&zynq_secondary_trampoline;
+ u32 phy_cpuid = cpu_logical_map(cpu);
/* MS: Expectation that SLCR are directly map and accessible */
/* Not possible to jump to non aligned address */
@@ -39,7 +41,7 @@ int zynq_cpun_start(u32 address, int cpu)
u32 trampoline_size = &zynq_secondary_trampoline_jump -
&zynq_secondary_trampoline;
- zynq_slcr_cpu_stop(cpu);
+ zynq_slcr_cpu_stop(phy_cpuid);
if (address) {
if (__pa(PAGE_OFFSET)) {
zero = ioremap(0, trampoline_code_size);
@@ -68,7 +70,7 @@ int zynq_cpun_start(u32 address, int cpu)
if (__pa(PAGE_OFFSET))
iounmap(zero);
}
- zynq_slcr_cpu_start(cpu);
+ zynq_slcr_cpu_start(phy_cpuid);
return 0;
}
@@ -81,6 +83,9 @@ EXPORT_SYMBOL(zynq_cpun_start);
static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
+ if (!zynq_efuse_cpu_state(cpu))
+ return -1;
+
return zynq_cpun_start(__pa_symbol(secondary_startup), cpu);
}
@@ -113,6 +118,7 @@ static void __init zynq_smp_prepare_cpus(unsigned int max_cpus)
static void zynq_secondary_init(unsigned int cpu)
{
zynq_core_pm_init();
+ zynq_prefetch_init();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm/mach-zynq/pm.c b/arch/arm/mach-zynq/pm.c
index 8ba450ab559c..b9445a654b59 100644
--- a/arch/arm/mach-zynq/pm.c
+++ b/arch/arm/mach-zynq/pm.c
@@ -7,6 +7,14 @@
* Sören Brinkmann <soren.brinkmann@xilinx.com>
*/
+#include <linux/clk/zynq.h>
+#include <linux/genalloc.h>
+#include <linux/suspend.h>
+#include <asm/cacheflush.h>
+#include <asm/fncpy.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/mach/map.h>
+#include <asm/suspend.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -22,6 +30,165 @@
static void __iomem *ddrc_base;
+#ifdef CONFIG_SUSPEND
+static int (*zynq_suspend_ptr)(void __iomem *, void __iomem *);
+
+static int zynq_pm_prepare_late(void)
+{
+ return zynq_clk_suspend_early();
+}
+
+static void zynq_pm_wake(void)
+{
+ zynq_clk_resume_late();
+}
+
+static int zynq_pm_suspend(unsigned long arg)
+{
+ u32 reg;
+ int do_ddrpll_bypass = 1;
+
+ /* Topswitch clock stop disable */
+ zynq_clk_topswitch_disable();
+
+ if (!zynq_suspend_ptr || !ddrc_base) {
+ do_ddrpll_bypass = 0;
+ } else {
+ /* enable DDRC self-refresh mode */
+ reg = readl(ddrc_base + DDRC_CTRL_REG1_OFFS);
+ reg |= DDRC_SELFREFRESH_MASK;
+ writel(reg, ddrc_base + DDRC_CTRL_REG1_OFFS);
+ }
+
+ if (do_ddrpll_bypass) {
+ /*
+ * Going this way will turn off DDR related clocks and the DDR
+ * PLL. I.e. We might brake sub systems relying on any of this
+ * clocks. And even worse: If there are any other masters in the
+ * system (e.g. in the PL) accessing DDR they are screwed.
+ */
+ flush_cache_all();
+ if (zynq_suspend_ptr(ddrc_base, zynq_slcr_base))
+ pr_warn("DDR self refresh failed.\n");
+ } else {
+ WARN_ONCE(1, "DRAM self-refresh not available\n");
+ cpu_do_idle();
+ }
+
+ /* disable DDRC self-refresh mode */
+ if (do_ddrpll_bypass) {
+ reg = readl(ddrc_base + DDRC_CTRL_REG1_OFFS);
+ reg &= ~DDRC_SELFREFRESH_MASK;
+ writel(reg, ddrc_base + DDRC_CTRL_REG1_OFFS);
+ }
+
+ /* Topswitch clock stop enable */
+ zynq_clk_topswitch_enable();
+
+ return 0;
+}
+
+static int zynq_pm_enter(suspend_state_t suspend_state)
+{
+ switch (suspend_state) {
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ cpu_suspend(0, zynq_pm_suspend);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct platform_suspend_ops zynq_pm_ops = {
+ .prepare_late = zynq_pm_prepare_late,
+ .enter = zynq_pm_enter,
+ .wake = zynq_pm_wake,
+ .valid = suspend_valid_only_mem,
+};
+
+/**
+ * zynq_pm_remap_ocm() - Remap OCM
+ * Returns a pointer to the mapped memory or NULL.
+ *
+ * Remap the OCM.
+ */
+static void __iomem *zynq_pm_remap_ocm(void)
+{
+ struct device_node *np;
+ const char *comp = "xlnx,zynq-ocmc-1.0";
+ void __iomem *base = NULL;
+
+ np = of_find_compatible_node(NULL, NULL, comp);
+ if (np) {
+ struct device *dev;
+ unsigned long pool_addr;
+ unsigned long pool_addr_virt;
+ struct gen_pool *pool;
+
+ of_node_put(np);
+
+ dev = &(of_find_device_by_node(np)->dev);
+
+ /* Get OCM pool from device tree or platform data */
+ pool = gen_pool_get(dev, NULL);
+ if (!pool) {
+ pr_warn("%s: OCM pool is not available\n", __func__);
+ return NULL;
+ }
+
+ pool_addr_virt = gen_pool_alloc(pool, zynq_sys_suspend_sz);
+ if (!pool_addr_virt) {
+ pr_warn("%s: Can't get OCM poll\n", __func__);
+ return NULL;
+ }
+ pool_addr = gen_pool_virt_to_phys(pool, pool_addr_virt);
+ if (!pool_addr) {
+ pr_warn("%s: Can't get physical address of OCM pool\n",
+ __func__);
+ return NULL;
+ }
+ base = __arm_ioremap_exec(pool_addr, zynq_sys_suspend_sz,
+ MT_MEMORY_RWX);
+ if (!base) {
+ pr_warn("%s: IOremap OCM pool failed\n", __func__);
+ return NULL;
+ }
+ pr_debug("%s: Remap OCM %s from %lx to %lx\n", __func__, comp,
+ pool_addr_virt, (unsigned long)base);
+ } else {
+ pr_warn("%s: no compatible node found for '%s'\n", __func__,
+ comp);
+ }
+
+ return base;
+}
+
+static void zynq_pm_suspend_init(void)
+{
+ void __iomem *ocm_base = zynq_pm_remap_ocm();
+
+ if (!ocm_base) {
+ pr_warn("%s: Unable to map OCM.\n", __func__);
+ } else {
+ /*
+ * Copy code to suspend system into OCM. The suspend code
+ * needs to run from OCM as DRAM may no longer be available
+ * when the PLL is stopped.
+ */
+ zynq_suspend_ptr = fncpy((__force void *)ocm_base,
+ (__force void *)&zynq_sys_suspend,
+ zynq_sys_suspend_sz);
+ }
+
+ suspend_set_ops(&zynq_pm_ops);
+}
+#else /* CONFIG_SUSPEND */
+static void zynq_pm_suspend_init(void) { };
+#endif /* CONFIG_SUSPEND */
+
/**
* zynq_pm_ioremap() - Create IO mappings
* @comp: DT compatible string
@@ -68,4 +235,7 @@ void __init zynq_pm_late_init(void)
reg |= DDRC_CLOCKSTOP_MASK;
writel(reg, ddrc_base + DDRC_DRAM_PARAM_REG3_OFFS);
}
+
+ /* set up suspend */
+ zynq_pm_suspend_init();
}
diff --git a/arch/arm/mach-zynq/slcr.c b/arch/arm/mach-zynq/slcr.c
index 37707614885a..18a36c48db2e 100644
--- a/arch/arm/mach-zynq/slcr.c
+++ b/arch/arm/mach-zynq/slcr.c
@@ -16,10 +16,13 @@
/* register offsets */
#define SLCR_UNLOCK_OFFSET 0x8 /* SCLR unlock register */
#define SLCR_PS_RST_CTRL_OFFSET 0x200 /* PS Software Reset Control */
+#define SLCR_FPGA_RST_CTRL_OFFSET 0x240 /* FPGA Software Reset Control */
#define SLCR_A9_CPU_RST_CTRL_OFFSET 0x244 /* CPU Software Reset Control */
#define SLCR_REBOOT_STATUS_OFFSET 0x258 /* PS Reboot Status */
#define SLCR_PSS_IDCODE 0x530 /* PS IDCODE */
#define SLCR_L2C_RAM 0xA1C /* L2C_RAM in AR#54190 */
+#define SLCR_LVL_SHFTR_EN_OFFSET 0x900 /* Level Shifters Enable */
+#define SLCR_OCM_CFG_OFFSET 0x910 /* OCM Address Mapping */
#define SLCR_UNLOCK_MAGIC 0xDF0D
#define SLCR_A9_CPU_CLKSTOP 0x10
@@ -27,7 +30,7 @@
#define SLCR_PSS_IDCODE_DEVICE_SHIFT 12
#define SLCR_PSS_IDCODE_DEVICE_MASK 0x1F
-static void __iomem *zynq_slcr_base;
+void __iomem *zynq_slcr_base;
static struct regmap *zynq_slcr_regmap;
/**
@@ -116,6 +119,48 @@ static struct notifier_block zynq_slcr_restart_nb = {
};
/**
+ * zynq_slcr_get_ocm_config - Get SLCR OCM config
+ *
+ * return: OCM config bits
+ */
+u32 zynq_slcr_get_ocm_config(void)
+{
+ u32 ret;
+
+ zynq_slcr_read(&ret, SLCR_OCM_CFG_OFFSET);
+ return ret;
+}
+
+/**
+ * zynq_slcr_init_preload_fpga - Disable communication from the PL to PS.
+ */
+void zynq_slcr_init_preload_fpga(void)
+{
+ /* Assert FPGA top level output resets */
+ zynq_slcr_write(0xF, SLCR_FPGA_RST_CTRL_OFFSET);
+
+ /* Disable level shifters */
+ zynq_slcr_write(0, SLCR_LVL_SHFTR_EN_OFFSET);
+
+ /* Enable output level shifters */
+ zynq_slcr_write(0xA, SLCR_LVL_SHFTR_EN_OFFSET);
+}
+EXPORT_SYMBOL(zynq_slcr_init_preload_fpga);
+
+/**
+ * zynq_slcr_init_postload_fpga - Re-enable communication from the PL to PS.
+ */
+void zynq_slcr_init_postload_fpga(void)
+{
+ /* Enable level shifters */
+ zynq_slcr_write(0xf, SLCR_LVL_SHFTR_EN_OFFSET);
+
+ /* Deassert AXI interface resets */
+ zynq_slcr_write(0, SLCR_FPGA_RST_CTRL_OFFSET);
+}
+EXPORT_SYMBOL(zynq_slcr_init_postload_fpga);
+
+/**
* zynq_slcr_cpu_start - Start cpu
* @cpu: cpu number
*/
diff --git a/arch/arm/mach-zynq/suspend.S b/arch/arm/mach-zynq/suspend.S
new file mode 100644
index 000000000000..f3f8440e8018
--- /dev/null
+++ b/arch/arm/mach-zynq/suspend.S
@@ -0,0 +1,185 @@
+/*
+ * Suspend support for Zynq
+ *
+ * Copyright (C) 2012 Xilinx
+ *
+ * Soren Brinkmann <soren.brinkmann@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+
+#define ARMPLL_CTRL_OFFS 0x100
+#define DDRPLL_CTRL_OFFS 0x104
+#define PLLSTATUS_OFFS 0x10c
+#define DDR_CLK_CTRL_OFFS 0x124
+#define DCI_CLK_CTRL_OFFS 0x128
+#define MODE_STS_OFFS 0x54
+
+#define PLL_RESET_MASK 1
+#define PLL_PWRDWN_MASK (1 << 1)
+#define PLL_BYPASS_MASK (1 << 4)
+#define DCICLK_ENABLE_MASK 1
+#define DDRCLK_ENABLE_MASK 3
+#define ARM_LOCK_MASK (1 << 0)
+#define DDR_LOCK_MASK (1 << 1)
+#define DDRC_STATUS_MASK 7
+
+#define DDRC_OPMODE_SR 3
+#define MAXTRIES 100
+
+ .text
+ .align 3
+
+/**
+ * zynq_sys_suspend - Enter suspend
+ * @ddrc_base: Base address of the DDRC
+ * @slcr_base: Base address of the SLCR
+ * Returns -1 if DRAM subsystem is not gated off, 0 otherwise.
+ *
+ * This function is moved into OCM and finishes the suspend operation. I.e. DDR
+ * related clocks are gated off and the DDR PLL is bypassed.
+ */
+ENTRY(zynq_sys_suspend)
+ push {r4 - r7}
+
+ /* Check DDRC is in self-refresh mode */
+ ldr r2, [r0, #MODE_STS_OFFS]
+ and r2, #DDRC_STATUS_MASK
+ cmp r2, #DDRC_OPMODE_SR
+ movweq r3, #0xff00
+ bne suspend
+
+ mov r3, #MAXTRIES
+ movw r4, #0xfff0
+ movt r4, #0x1f
+ /* Wait for command queue empty */
+1: subs r3, #1
+ movweq r3, #0xff00
+ beq suspend
+ dsb sy
+ ldr r2, [r0, #MODE_STS_OFFS]
+ ands r2, r4
+ bne 1b
+
+ dsb sy
+
+ /*
+ * Wait for DDRC pipeline/queues to drain.
+ * We should wait ~40 DDR cycles. DDR is still at full speed while the
+ * CPU might already run in PLL bypass mode. The fastest speed the CPU
+ * runs at is ~1 GHz ~ 2 * DDR speed.
+ */
+ mov r3, #160
+1: nop
+ subs r3, #1
+ bne 1b
+
+ dsb sy
+
+ /* read back CAM status once more */
+ ldr r2, [r0, #MODE_STS_OFFS]
+ ands r2, r4
+ movwne r3, #0xff00
+ bne suspend
+
+ /* Stop DDR clocks */
+ ldr r2, [r1, #DDR_CLK_CTRL_OFFS]
+ bic r2, #DDRCLK_ENABLE_MASK
+ str r2, [r1, #DDR_CLK_CTRL_OFFS]
+
+ dmb st
+
+ ldr r2, [r1, #DCI_CLK_CTRL_OFFS]
+ bic r2, #DCICLK_ENABLE_MASK
+ str r2, [r1, #DCI_CLK_CTRL_OFFS]
+
+ dmb st
+
+ /* Bypass and powerdown DDR PLL */
+ ldr r2, [r1, #DDRPLL_CTRL_OFFS]
+ orr r2, #PLL_BYPASS_MASK
+ str r2, [r1, #DDRPLL_CTRL_OFFS]
+ orr r2, #(PLL_PWRDWN_MASK | PLL_RESET_MASK)
+ str r2, [r1, #DDRPLL_CTRL_OFFS]
+
+ /* Bypass and powerdown ARM PLL */
+ ldr r2, [r1, #ARMPLL_CTRL_OFFS]
+ orr r2, #PLL_BYPASS_MASK
+ str r2, [r1, #ARMPLL_CTRL_OFFS]
+ orr r2, #(PLL_PWRDWN_MASK | PLL_RESET_MASK)
+ str r2, [r1, #ARMPLL_CTRL_OFFS]
+
+suspend:
+ dsb sy
+ wfi
+ dsb sy
+ cmp r3, #0xff00
+ moveq r0, #-1
+ beq exit
+
+ /* Power up ARM PLL */
+ ldr r2, [r1, #ARMPLL_CTRL_OFFS]
+ bic r2, #(PLL_PWRDWN_MASK | PLL_RESET_MASK)
+ str r2, [r1, #ARMPLL_CTRL_OFFS]
+ /* wait for lock */
+1: ldr r2, [r1, #PLLSTATUS_OFFS]
+ ands r2, #ARM_LOCK_MASK
+ beq 1b
+
+ dsb sy
+
+ /* Disable ARM PLL bypass */
+ ldr r2, [r1, #ARMPLL_CTRL_OFFS]
+ bic r2, #PLL_BYPASS_MASK
+ str r2, [r1, #ARMPLL_CTRL_OFFS]
+
+ dmb st
+
+ /* Power up DDR PLL */
+ ldr r2, [r1, #DDRPLL_CTRL_OFFS]
+ bic r2, #(PLL_PWRDWN_MASK | PLL_RESET_MASK)
+ str r2, [r1, #DDRPLL_CTRL_OFFS]
+ /* wait for lock */
+1: ldr r2, [r1, #PLLSTATUS_OFFS]
+ ands r2, #DDR_LOCK_MASK
+ beq 1b
+
+ dsb sy
+
+ /* Disable DDR PLL bypass */
+ ldr r2, [r1, #DDRPLL_CTRL_OFFS]
+ bic r2, #PLL_BYPASS_MASK
+ str r2, [r1, #DDRPLL_CTRL_OFFS]
+
+ dmb st
+
+ /* Start DDR clocks */
+ ldr r2, [r1, #DCI_CLK_CTRL_OFFS]
+ orr r2, #DCICLK_ENABLE_MASK
+ str r2, [r1, #DCI_CLK_CTRL_OFFS]
+
+ dmb st
+
+ ldr r2, [r1, #DDR_CLK_CTRL_OFFS]
+ orr r2, #DDRCLK_ENABLE_MASK
+ str r2, [r1, #DDR_CLK_CTRL_OFFS]
+
+ dsb sy
+
+ mov r0, #0
+exit: pop {r4 - r7}
+ bx lr
+
+ENTRY(zynq_sys_suspend_sz)
+ .word . - zynq_sys_suspend
+
+ ENDPROC(zynq_sys_suspend)
diff --git a/arch/arm/mach-zynq/zynq_ocm.c b/arch/arm/mach-zynq/zynq_ocm.c
new file mode 100644
index 000000000000..324b7c125bf5
--- /dev/null
+++ b/arch/arm/mach-zynq/zynq_ocm.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2013 Xilinx
+ *
+ * Based on "Generic on-chip SRAM allocation driver"
+ *
+ * Copyright (C) 2012 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/genalloc.h>
+
+#include "common.h"
+
+#define ZYNQ_OCM_HIGHADDR 0xfffc0000
+#define ZYNQ_OCM_LOWADDR 0x0
+#define ZYNQ_OCM_BLOCK_SIZE 0x10000
+#define ZYNQ_OCM_BLOCKS 4
+#define ZYNQ_OCM_GRANULARITY 32
+
+#define ZYNQ_OCM_PARITY_CTRL 0x0
+#define ZYNQ_OCM_PARITY_ENABLE 0x1e
+
+#define ZYNQ_OCM_PARITY_ERRADDRESS 0x4
+
+#define ZYNQ_OCM_IRQ_STS 0x8
+#define ZYNQ_OCM_IRQ_STS_ERR_MASK 0x7
+
+struct zynq_ocm_dev {
+ void __iomem *base;
+ int irq;
+ struct gen_pool *pool;
+ struct resource res[ZYNQ_OCM_BLOCKS];
+};
+
+/**
+ * zynq_ocm_irq_handler - Interrupt service routine of the OCM controller
+ * @irq: IRQ number
+ * @data: Pointer to the zynq_ocm_dev structure
+ *
+ * Return: IRQ_HANDLED when handled; IRQ_NONE otherwise.
+ */
+static irqreturn_t zynq_ocm_irq_handler(int irq, void *data)
+{
+ u32 sts;
+ u32 err_addr;
+ struct zynq_ocm_dev *zynq_ocm = data;
+
+ /* check status */
+ sts = readl(zynq_ocm->base + ZYNQ_OCM_IRQ_STS);
+ if (sts & ZYNQ_OCM_IRQ_STS_ERR_MASK) {
+ /* check error address */
+ err_addr = readl(zynq_ocm->base + ZYNQ_OCM_PARITY_ERRADDRESS);
+ pr_err("%s: OCM err intr generated at 0x%04x (stat: 0x%08x).",
+ __func__, err_addr, sts & ZYNQ_OCM_IRQ_STS_ERR_MASK);
+ return IRQ_HANDLED;
+ }
+ pr_warn("%s: Interrupt generated by OCM, but no error is found.",
+ __func__);
+
+ return IRQ_NONE;
+}
+
+/**
+ * zynq_ocm_probe - Probe method for the OCM driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_ocm_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct zynq_ocm_dev *zynq_ocm;
+ u32 i, ocm_config, curr;
+ struct resource *res;
+
+ ocm_config = zynq_slcr_get_ocm_config();
+
+ zynq_ocm = devm_kzalloc(&pdev->dev, sizeof(*zynq_ocm), GFP_KERNEL);
+ if (!zynq_ocm)
+ return -ENOMEM;
+
+ zynq_ocm->pool = devm_gen_pool_create(&pdev->dev,
+ ilog2(ZYNQ_OCM_GRANULARITY),
+ NUMA_NO_NODE, NULL);
+ if (!zynq_ocm->pool)
+ return -ENOMEM;
+
+ curr = 0; /* For storing current struct resource for OCM */
+ for (i = 0; i < ZYNQ_OCM_BLOCKS; i++) {
+ u32 base, start, end;
+
+ /* Setup base address for 64kB OCM block */
+ if (ocm_config & BIT(i))
+ base = ZYNQ_OCM_HIGHADDR;
+ else
+ base = ZYNQ_OCM_LOWADDR;
+
+ /* Calculate start and end block addresses */
+ start = i * ZYNQ_OCM_BLOCK_SIZE + base;
+ end = start + (ZYNQ_OCM_BLOCK_SIZE - 1);
+
+ /* Concatenate OCM blocks together to get bigger pool */
+ if (i > 0 && start == (zynq_ocm->res[curr - 1].end + 1)) {
+ zynq_ocm->res[curr - 1].end = end;
+ } else {
+#ifdef CONFIG_SMP
+ /*
+ * OCM block if placed at 0x0 has special meaning
+ * for SMP because jump trampoline is added there.
+ * Ensure that this address won't be allocated.
+ */
+ if (!base) {
+ u32 trampoline_code_size =
+ &zynq_secondary_trampoline_end -
+ &zynq_secondary_trampoline;
+ dev_dbg(&pdev->dev,
+ "Allocate reset vector table %dB\n",
+ trampoline_code_size);
+ /* postpone start offset */
+ start += trampoline_code_size;
+ }
+#endif
+ /* First resource is always initialized */
+ zynq_ocm->res[curr].start = start;
+ zynq_ocm->res[curr].end = end;
+ zynq_ocm->res[curr].flags = IORESOURCE_MEM;
+ curr++; /* Increment curr value */
+ }
+ dev_dbg(&pdev->dev, "OCM block %d, start %x, end %x\n",
+ i, start, end);
+ }
+
+ /*
+ * Separate pool allocation from OCM block detection to ensure
+ * the biggest possible pool.
+ */
+ for (i = 0; i < ZYNQ_OCM_BLOCKS; i++) {
+ unsigned long size;
+ void __iomem *virt_base;
+
+ /* Skip all zero size resources */
+ if (zynq_ocm->res[i].end == 0)
+ break;
+ dev_dbg(&pdev->dev, "OCM resources %d, start %x, end %x\n",
+ i, zynq_ocm->res[i].start, zynq_ocm->res[i].end);
+ size = resource_size(&zynq_ocm->res[i]);
+ virt_base = devm_ioremap_resource(&pdev->dev,
+ &zynq_ocm->res[i]);
+ if (IS_ERR(virt_base))
+ return PTR_ERR(virt_base);
+
+ ret = gen_pool_add_virt(zynq_ocm->pool,
+ (unsigned long)virt_base,
+ zynq_ocm->res[i].start, size, -1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Gen pool failed\n");
+ return ret;
+ }
+ dev_info(&pdev->dev, "ZYNQ OCM pool: %ld KiB @ 0x%p\n",
+ size / 1024, virt_base);
+ }
+
+ /* Get OCM config space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ zynq_ocm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(zynq_ocm->base))
+ return PTR_ERR(zynq_ocm->base);
+
+ /* Allocate OCM parity IRQ */
+ zynq_ocm->irq = platform_get_irq(pdev, 0);
+ if (zynq_ocm->irq < 0) {
+ dev_err(&pdev->dev, "irq resource not found\n");
+ return zynq_ocm->irq;
+ }
+ ret = devm_request_irq(&pdev->dev, zynq_ocm->irq, zynq_ocm_irq_handler,
+ 0, pdev->name, zynq_ocm);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ return ret;
+ }
+
+ /* Enable parity errors */
+ writel(ZYNQ_OCM_PARITY_ENABLE, zynq_ocm->base + ZYNQ_OCM_PARITY_CTRL);
+
+ platform_set_drvdata(pdev, zynq_ocm);
+
+ return 0;
+}
+
+/**
+ * zynq_ocm_remove - Remove method for the OCM driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_ocm_remove(struct platform_device *pdev)
+{
+ struct zynq_ocm_dev *zynq_ocm = platform_get_drvdata(pdev);
+
+ if (gen_pool_avail(zynq_ocm->pool) < gen_pool_size(zynq_ocm->pool))
+ dev_dbg(&pdev->dev, "removed while SRAM allocated\n");
+
+ return 0;
+}
+
+static struct of_device_id zynq_ocm_dt_ids[] = {
+ { .compatible = "xlnx,zynq-ocmc-1.0" },
+ { /* end of table */ }
+};
+
+static struct platform_driver zynq_ocm_driver = {
+ .driver = {
+ .name = "zynq-ocm",
+ .of_match_table = zynq_ocm_dt_ids,
+ },
+ .probe = zynq_ocm_probe,
+ .remove = zynq_ocm_remove,
+};
+
+static int __init zynq_ocm_init(void)
+{
+ return platform_driver_register(&zynq_ocm_driver);
+}
+
+arch_initcall(zynq_ocm_init);
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index d07fc063c930..3f5e0bfed17f 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -289,6 +289,8 @@ config ARCH_ZX
config ARCH_ZYNQMP
bool "Xilinx ZynqMP Family"
+ select PINCTRL
+ select PINCTRL_ZYNQMP
select ZYNQMP_FIRMWARE
help
This enables support for Xilinx ZynqMP Family
diff --git a/arch/arm64/boot/dts/xilinx/Makefile b/arch/arm64/boot/dts/xilinx/Makefile
index 60f5443f3ef4..bec4746fe721 100644
--- a/arch/arm64/boot/dts/xilinx/Makefile
+++ b/arch/arm64/boot/dts/xilinx/Makefile
@@ -3,6 +3,7 @@ dtb-$(CONFIG_ARCH_ZYNQMP) += avnet-ultra96-rev1.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1232-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1254-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1275-revA.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1275-revB.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1751-xm015-dc1.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1751-xm016-dc2.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1751-xm017-dc3.dtb
@@ -13,5 +14,7 @@ dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu102-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu102-revB.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu102-rev1.0.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu104-revA.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu104-revC.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu106-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu111-revA.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu1285-revA.dtb
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
new file mode 100644
index 000000000000..d4ce8499020c
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Clock specification for Xilinx ZynqMP
+ *
+ * (C) Copyright 2017, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ */
+
+#include <dt-bindings/clock/xlnx-zynqmp-clk.h>
+/ {
+ fclk0: fclk0 {
+ status = "okay";
+ compatible = "xlnx,fclk";
+ clocks = <&zynqmp_clk PL0_REF>;
+ };
+
+ fclk1: fclk1 {
+ status = "okay";
+ compatible = "xlnx,fclk";
+ clocks = <&zynqmp_clk PL1_REF>;
+ };
+
+ fclk2: fclk2 {
+ status = "okay";
+ compatible = "xlnx,fclk";
+ clocks = <&zynqmp_clk PL2_REF>;
+ };
+
+ fclk3: fclk3 {
+ status = "okay";
+ compatible = "xlnx,fclk";
+ clocks = <&zynqmp_clk PL3_REF>;
+ };
+
+ pss_ref_clk: pss_ref_clk {
+ u-boot,dm-pre-reloc;
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <33333333>;
+ };
+
+ video_clk: video_clk {
+ u-boot,dm-pre-reloc;
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+
+ pss_alt_ref_clk: pss_alt_ref_clk {
+ u-boot,dm-pre-reloc;
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ gt_crx_ref_clk: gt_crx_ref_clk {
+ u-boot,dm-pre-reloc;
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <108000000>;
+ };
+
+ aux_ref_clk: aux_ref_clk {
+ u-boot,dm-pre-reloc;
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+
+ dp_aclk: dp_aclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-accuracy = <100>;
+ };
+};
+
+&zynqmp_firmware {
+ zynqmp_clk: clock-controller {
+ u-boot,dm-pre-reloc;
+ #clock-cells = <1>;
+ compatible = "xlnx,zynqmp-clk";
+ clocks = <&pss_ref_clk>, <&video_clk>, <&pss_alt_ref_clk>,
+ <&aux_ref_clk>, <&gt_crx_ref_clk>;
+ clock-names = "pss_ref_clk", "video_clk", "pss_alt_ref_clk",
+ "aux_ref_clk", "gt_crx_ref_clk";
+ };
+};
+
+&can0 {
+ clocks = <&zynqmp_clk CAN0_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&can1 {
+ clocks = <&zynqmp_clk CAN1_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&cpu0 {
+ clocks = <&zynqmp_clk ACPU>;
+};
+
+&fpd_dma_chan1 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan2 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan3 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan4 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan5 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan6 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan7 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&fpd_dma_chan8 {
+ clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&gpu {
+ clocks = <&zynqmp_clk GPU_REF>, <&zynqmp_clk GPU_PP0_REF>, <&zynqmp_clk GPU_PP1_REF>;
+};
+
+&lpd_dma_chan1 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan2 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan3 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan4 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan5 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan6 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan7 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&lpd_dma_chan8 {
+ clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&nand0 {
+ clocks = <&zynqmp_clk NAND_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&gem0 {
+ clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM0_REF>, <&zynqmp_clk GEM0_TX>,
+ <&zynqmp_clk GEM0_RX>, <&zynqmp_clk GEM_TSU>;
+ clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
+};
+
+&gem1 {
+ clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM1_REF>, <&zynqmp_clk GEM1_TX>,
+ <&zynqmp_clk GEM1_RX>, <&zynqmp_clk GEM_TSU>;
+ clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
+};
+
+&gem2 {
+ clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM2_REF>, <&zynqmp_clk GEM2_TX>,
+ <&zynqmp_clk GEM2_RX>, <&zynqmp_clk GEM_TSU>;
+ clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
+};
+
+&gem3 {
+ clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM3_REF>, <&zynqmp_clk GEM3_TX>,
+ <&zynqmp_clk GEM3_RX>, <&zynqmp_clk GEM_TSU>;
+ clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
+};
+
+&gpio {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&i2c0 {
+ clocks = <&zynqmp_clk I2C0_REF>;
+};
+
+&i2c1 {
+ clocks = <&zynqmp_clk I2C1_REF>;
+};
+
+&perf_monitor_ocm {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&perf_monitor_ddr {
+ clocks = <&zynqmp_clk TOPSW_LSBUS>;
+};
+
+&perf_monitor_cci {
+ clocks = <&zynqmp_clk TOPSW_LSBUS>;
+};
+
+&perf_monitor_lpd {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&pcie {
+ clocks = <&zynqmp_clk PCIE_REF>;
+};
+
+&qspi {
+ clocks = <&zynqmp_clk QSPI_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&sata {
+ clocks = <&zynqmp_clk SATA_REF>;
+};
+
+&sdhci0 {
+ clocks = <&zynqmp_clk SDIO0_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&sdhci1 {
+ clocks = <&zynqmp_clk SDIO1_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&spi0 {
+ clocks = <&zynqmp_clk SPI0_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&spi1 {
+ clocks = <&zynqmp_clk SPI1_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&ttc0 {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&ttc1 {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&ttc2 {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&ttc3 {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&uart0 {
+ clocks = <&zynqmp_clk UART0_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&uart1 {
+ clocks = <&zynqmp_clk UART1_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
+&usb0 {
+ clocks = <&zynqmp_clk USB0_BUS_REF>, <&zynqmp_clk USB3_DUAL_REF>;
+};
+
+&usb1 {
+ clocks = <&zynqmp_clk USB1_BUS_REF>, <&zynqmp_clk USB3_DUAL_REF>;
+};
+
+&watchdog0 {
+ clocks = <&zynqmp_clk WDT>;
+};
+
+&lpd_watchdog {
+ clocks = <&zynqmp_clk LPD_WDT>;
+};
+
+&xilinx_ams {
+ clocks = <&zynqmp_clk AMS_REF>;
+};
+
+&zynqmp_dpsub {
+ clocks = <&dp_aclk>, <&zynqmp_clk DP_AUDIO_REF>, <&zynqmp_clk DP_VIDEO_REF>;
+};
+
+&xlnx_dpdma {
+ clocks = <&zynqmp_clk DPDMA_REF>;
+};
+
+&zynqmp_dp_snd_codec0 {
+ clocks = <&zynqmp_clk DP_AUDIO_REF>;
+};
+
+&pcap {
+ clocks = <&zynqmp_clk PCAP>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi
index 306ad2157c98..1f45f4290592 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi
@@ -12,6 +12,7 @@
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <100000000>;
+ u-boot,dm-pre-reloc;
};
clk125: clk125 {
@@ -24,6 +25,7 @@
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <200000000>;
+ u-boot,dm-pre-reloc;
};
clk250: clk250 {
@@ -58,13 +60,13 @@
clock-accuracy = <100>;
};
- dpdma_clk: dpdma-clk {
+ dpdma_clk: dpdma_clk {
compatible = "fixed-clock";
#clock-cells = <0x0>;
clock-frequency = <533000000>;
};
- drm_clock: drm-clock {
+ drm_clock: drm_clock {
compatible = "fixed-clock";
#clock-cells = <0x0>;
clock-frequency = <262750000>;
@@ -144,6 +146,10 @@
clocks = <&clk600>, <&clk100>;
};
+&nand0 {
+ clocks = <&clk100 &clk100>;
+};
+
&gem0 {
clocks = <&clk125>, <&clk125>, <&clk125>;
};
@@ -172,6 +178,26 @@
clocks = <&clk100>;
};
+&perf_monitor_ocm {
+ clocks = <&clk100>;
+};
+
+&perf_monitor_ddr {
+ clocks = <&clk100>;
+};
+
+&perf_monitor_cci {
+ clocks = <&clk100>;
+};
+
+&perf_monitor_lpd {
+ clocks = <&clk100>;
+};
+
+&qspi {
+ clocks = <&clk300 &clk300>;
+};
+
&sata {
clocks = <&clk250>;
};
@@ -209,5 +235,21 @@
};
&watchdog0 {
+ clocks = <&clk100>;
+};
+
+&lpd_watchdog {
clocks = <&clk250>;
};
+
+&zynqmp_dpsub {
+ clocks = <&dp_aclk>, <&dp_aud_clk>, <&drm_clock>;
+};
+
+&xlnx_dpdma {
+ clocks = <&dpdma_clk>;
+};
+
+&zynqmp_dp_snd_codec0 {
+ clocks = <&dp_aud_clk>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts
index 0f7b4cf6078e..5c212ba468e6 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts
@@ -10,7 +10,8 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZC1232 RevA";
@@ -19,6 +20,7 @@
aliases {
serial0 = &uart0;
serial1 = &dcc;
+ spi0 = &qspi;
};
chosen {
@@ -36,6 +38,35 @@
status = "okay";
};
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "spi-flash"; /* 32MB FIXME */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&sata {
status = "okay";
/* SATA OOB timing settings */
@@ -47,6 +78,8 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane0 PHY_TYPE_SATA 0 0 125000000>, <&lane1 PHY_TYPE_SATA 1 1 125000000>;
};
&uart0 {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts
index 9092828f92ec..881aacc58253 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts
@@ -11,7 +11,7 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
/ {
model = "ZynqMP ZC1254 RevA";
@@ -20,6 +20,7 @@
aliases {
serial0 = &uart0;
serial1 = &dcc;
+ spi0 = &qspi;
};
chosen {
@@ -37,6 +38,35 @@
status = "okay";
};
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "spi-flash"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts
index 4f404c580eec..7403f153e447 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts
@@ -11,7 +11,7 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
/ {
model = "ZynqMP ZC1275 RevA";
@@ -20,6 +20,7 @@
aliases {
serial0 = &uart0;
serial1 = &dcc;
+ spi0 = &qspi;
};
chosen {
@@ -37,6 +38,35 @@
status = "okay";
};
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "spi-flash"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revB.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revB.dts
new file mode 100644
index 000000000000..72517aef07ea
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revB.dts
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dts file for Xilinx ZynqMP ZC1275 RevB
+ *
+ * (C) Copyright 2018, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Siva Durga Prasad Paladugu <sivadur@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+
+/ {
+ model = "ZynqMP ZC1275 RevB";
+ compatible = "xlnx,zynqmp-zc1275-revB", "xlnx,zynqmp-zc1275", "xlnx,zynqmp";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &dcc;
+ spi0 = &qspi;
+ mmc0 = &sdhci1;
+ ethernet0 = &gem1;
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+};
+
+&dcc {
+ status = "okay";
+};
+
+&gem1 {
+ mdio {
+ phy1: ethernet-phy@1 {
+ reg = <1>; /* KSZ9031RNXIC on AES-FMC-NETW1-G */
+ rxc-skew-ps = <1800>; /* Skew control of RX_CLK pad output */
+ txc-skew-ps = <1800>; /* Skew control of GTX_CLK pad input */
+ txen-skew-ps = <900>; /* Skew control of TX_CTL pad input */
+ rxdv-skew-ps = <0>; /* Skew control of RX_CTL pad output */
+ rxd0-skew-ps = <0>; /* Skew control of RXD0 pad output */
+ rxd1-skew-ps = <0>; /* Skew control of RXD1 pad output */
+ rxd2-skew-ps = <0>; /* Skew control of RXD2 pad output */
+ rxd3-skew-ps = <0>; /* Skew control of RXD3 pad output */
+ txd0-skew-ps = <900>; /* Skew control of TXD0 pad input */
+ txd1-skew-ps = <900>; /* Skew control of TXD1 pad input */
+ txd2-skew-ps = <900>; /* Skew control of TXD2 pad input */
+ txd3-skew-ps = <900>; /* Skew control of TXD3 pad input */
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "spi-flash"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <1>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&sdhci1 {
+ status = "okay";
+ no-1-8-v;
+ xlnx,mio_bank = <1>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
index 9a3e39d1294f..3cbeaccd7ba4 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
@@ -10,8 +10,10 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+#include <dt-bindings/phy/phy.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
/ {
model = "ZynqMP zc1751-xm015-dc1 RevA";
@@ -19,11 +21,14 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c1;
mmc0 = &sdhci0;
mmc1 = &sdhci1;
rtc0 = &rtc;
serial0 = &uart0;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
@@ -73,6 +78,8 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
phy0: phy@0 {
reg = <0>;
};
@@ -80,12 +87,22 @@
&gpio {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_default>;
};
+&gpu {
+ status = "okay";
+};
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 36 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 37 GPIO_ACTIVE_HIGH>;
eeprom: eeprom@55 {
compatible = "atmel,24c64"; /* 24AA64 */
@@ -93,6 +110,245 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_9_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_9_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_36_grp", "gpio0_37_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_36_grp", "gpio0_37_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_8_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_8_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO34";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO35";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci0_default: sdhci0-default {
+ mux {
+ groups = "sdio0_0_grp";
+ function = "sdio0";
+ };
+
+ conf {
+ groups = "sdio0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio0_cd_0_grp";
+ function = "sdio0_cd";
+ };
+
+ conf-cd {
+ groups = "sdio0_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio0_wp_0_grp";
+ function = "sdio0_wp";
+ };
+
+ conf-wp {
+ groups = "sdio0_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio1_wp_0_grp";
+ function = "sdio1_wp";
+ };
+
+ conf-wp {
+ groups = "sdio1_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_gpio_default: gpio-default {
+ mux {
+ function = "gpio0";
+ groups = "gpio0_38_grp";
+ };
+
+ conf {
+ groups = "gpio0_38_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "spi-flash"; /* Micron MT25QU512ABB8ESF */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -108,24 +364,75 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x19 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 3 150000000>;
};
/* eMMC */
&sdhci0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci0_default>;
bus-width = <8>;
+ xlnx,mio_bank = <0>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <1>;
+};
+
+&serdes {
+ status = "okay";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 0 27000000>,
+ <&lane0 PHY_TYPE_DP 1 1 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
index 2421ec71a201..0e3df864fe05 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
@@ -10,8 +10,9 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
/ {
model = "ZynqMP zc1751-xm016-dc2 RevA";
@@ -21,12 +22,14 @@
can0 = &can0;
can1 = &can1;
ethernet0 = &gem2;
+ gpio0 = &gpio;
i2c0 = &i2c0;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &uart1;
spi0 = &spi0;
spi1 = &spi1;
+ usb0 = &usb1;
};
chosen {
@@ -42,10 +45,14 @@
&can0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can0_default>;
};
&can1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
};
&fpd_dma_chan1 {
@@ -84,6 +91,8 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem2_default>;
phy0: phy@5 {
reg = <5>;
ti,rx-internal-delay = <0x8>;
@@ -100,6 +109,11 @@
&i2c0 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 6 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 7 GPIO_ACTIVE_HIGH>;
tca6416_u26: gpio@20 {
compatible = "ti,tca6416";
@@ -115,6 +129,353 @@
};
};
+&nand0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_nand0_default>;
+ arasan,has-mdma;
+
+ nand@0 {
+ reg = <0x0>;
+ #address-cells = <0x2>;
+ #size-cells = <0x1>;
+
+ partition@0 { /* for testing purpose */
+ label = "nand-fsbl-uboot";
+ reg = <0x0 0x0 0x400000>;
+ };
+ partition@1 { /* for testing purpose */
+ label = "nand-linux";
+ reg = <0x0 0x400000 0x1400000>;
+ };
+ partition@2 { /* for testing purpose */
+ label = "nand-device-tree";
+ reg = <0x0 0x1800000 0x400000>;
+ };
+ partition@3 { /* for testing purpose */
+ label = "nand-rootfs";
+ reg = <0x0 0x1c00000 0x1400000>;
+ };
+ partition@4 { /* for testing purpose */
+ label = "nand-bitstream";
+ reg = <0x0 0x3000000 0x400000>;
+ };
+ partition@5 { /* for testing purpose */
+ label = "nand-misc";
+ reg = <0x0 0x3400000 0xfcc00000>;
+ };
+ };
+ nand@1 {
+ reg = <0x1>;
+ #address-cells = <0x2>;
+ #size-cells = <0x1>;
+
+ partition@0 { /* for testing purpose */
+ label = "nand1-fsbl-uboot";
+ reg = <0x0 0x0 0x400000>;
+ };
+ partition@1 { /* for testing purpose */
+ label = "nand1-linux";
+ reg = <0x0 0x400000 0x1400000>;
+ };
+ partition@2 { /* for testing purpose */
+ label = "nand1-device-tree";
+ reg = <0x0 0x1800000 0x400000>;
+ };
+ partition@3 { /* for testing purpose */
+ label = "nand1-rootfs";
+ reg = <0x0 0x1c00000 0x1400000>;
+ };
+ partition@4 { /* for testing purpose */
+ label = "nand1-bitstream";
+ reg = <0x0 0x3000000 0x400000>;
+ };
+ partition@5 { /* for testing purpose */
+ label = "nand1-misc";
+ reg = <0x0 0x3400000 0xfcc00000>;
+ };
+ };
+};
+
+&pinctrl0 {
+ status = "okay";
+ pinctrl_can0_default: can0-default {
+ mux {
+ function = "can0";
+ groups = "can0_9_grp";
+ };
+
+ conf {
+ groups = "can0_9_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO38";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO39";
+ bias-disable;
+ };
+ };
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_8_grp";
+ };
+
+ conf {
+ groups = "can1_8_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO33";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO32";
+ bias-disable;
+ };
+ };
+
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_1_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_1_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_6_grp", "gpio0_7_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_6_grp", "gpio0_7_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_10_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_10_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO42";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO43";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_10_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_10_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO41";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO40";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb1_default: usb1-default {
+ mux {
+ groups = "usb1_0_grp";
+ function = "usb1";
+ };
+
+ conf {
+ groups = "usb1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO64", "MIO65", "MIO67";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO66", "MIO68", "MIO69", "MIO70", "MIO71",
+ "MIO72", "MIO73", "MIO74", "MIO75";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem2_default: gem2-default {
+ mux {
+ function = "ethernet2";
+ groups = "ethernet2_0_grp";
+ };
+
+ conf {
+ groups = "ethernet2_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO58", "MIO59", "MIO60", "MIO61", "MIO62",
+ "MIO63";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO52", "MIO53", "MIO54", "MIO55", "MIO56",
+ "MIO57";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio2";
+ groups = "mdio2_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio2_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_nand0_default: nand0-default {
+ mux {
+ groups = "nand0_0_grp";
+ function = "nand0";
+ };
+
+ conf {
+ groups = "nand0_0_grp";
+ bias-pull-up;
+ };
+
+ mux-ce {
+ groups = "nand0_ce_0_grp";
+ function = "nand0_ce";
+ };
+
+ conf-ce {
+ groups = "nand0_ce_0_grp";
+ bias-pull-up;
+ };
+
+ mux-rb {
+ groups = "nand0_rb_0_grp";
+ function = "nand0_rb";
+ };
+
+ conf-rb {
+ groups = "nand0_rb_0_grp";
+ bias-pull-up;
+ };
+
+ mux-dqs {
+ groups = "nand0_dqs_0_grp";
+ function = "nand0_dqs";
+ };
+
+ conf-dqs {
+ groups = "nand0_dqs_0_grp";
+ bias-pull-up;
+ };
+ };
+
+ pinctrl_spi0_default: spi0-default {
+ mux {
+ groups = "spi0_0_grp";
+ function = "spi0";
+ };
+
+ conf {
+ groups = "spi0_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-cs {
+ groups = "spi0_ss_0_grp", "spi0_ss_1_grp",
+ "spi0_ss_2_grp";
+ function = "spi0_ss";
+ };
+
+ conf-cs {
+ groups = "spi0_ss_0_grp", "spi0_ss_1_grp",
+ "spi0_ss_2_grp";
+ bias-disable;
+ };
+ };
+
+ pinctrl_spi1_default: spi1-default {
+ mux {
+ groups = "spi1_3_grp";
+ function = "spi1";
+ };
+
+ conf {
+ groups = "spi1_3_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-cs {
+ groups = "spi1_ss_9_grp", "spi1_ss_10_grp",
+ "spi1_ss_11_grp";
+ function = "spi1_ss";
+ };
+
+ conf-cs {
+ groups = "spi1_ss_9_grp", "spi1_ss_10_grp",
+ "spi1_ss_11_grp";
+ bias-disable;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -122,8 +483,10 @@
&spi0 {
status = "okay";
num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0_default>;
- spi0_flash0: flash0@0 {
+ spi0_flash0: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "sst,sst25wf080", "jedec,spi-nor";
@@ -140,8 +503,10 @@
&spi1 {
status = "okay";
num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
- spi1_flash0: flash0@0 {
+ spi1_flash0: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "atmel,at45db041e", "atmel,at45", "atmel,dataflash";
@@ -158,12 +523,23 @@
/* ULPI SMSC USB3320 */
&usb1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1_default>;
+};
+
+&dwc3_1 {
+ status = "okay";
+ dr_mode = "host";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts
index 7a49deeae647..d6a010355bb8 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts
@@ -10,7 +10,7 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
/ {
model = "ZynqMP zc1751-xm017-dc3 RevA";
@@ -18,12 +18,15 @@
aliases {
ethernet0 = &gem0;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci1;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &uart1;
+ usb0 = &usb0;
+ usb1 = &usb1;
};
chosen {
@@ -107,6 +110,63 @@
clock-frequency = <400000>;
};
+/* MT29F64G08AECDBJ4-6 */
+&nand0 {
+ status = "okay";
+ arasan,has-mdma;
+ num-cs = <2>;
+
+ partition@0 { /* for testing purpose */
+ label = "nand-fsbl-uboot";
+ reg = <0x0 0x0 0x400000>;
+ };
+ partition@1 { /* for testing purpose */
+ label = "nand-linux";
+ reg = <0x0 0x400000 0x1400000>;
+ };
+ partition@2 { /* for testing purpose */
+ label = "nand-device-tree";
+ reg = <0x0 0x1800000 0x400000>;
+ };
+ partition@3 { /* for testing purpose */
+ label = "nand-rootfs";
+ reg = <0x0 0x1C00000 0x1400000>;
+ };
+ partition@4 { /* for testing purpose */
+ label = "nand-bitstream";
+ reg = <0x0 0x3000000 0x400000>;
+ };
+ partition@5 { /* for testing purpose */
+ label = "nand-misc";
+ reg = <0x0 0x3400000 0xFCC00000>;
+ };
+
+ partition@6 { /* for testing purpose */
+ label = "nand1-fsbl-uboot";
+ reg = <0x1 0x0 0x400000>;
+ };
+ partition@7 { /* for testing purpose */
+ label = "nand1-linux";
+ reg = <0x1 0x400000 0x1400000>;
+ };
+ partition@8 { /* for testing purpose */
+ label = "nand1-device-tree";
+ reg = <0x1 0x1800000 0x400000>;
+ };
+ partition@9 { /* for testing purpose */
+ label = "nand1-rootfs";
+ reg = <0x1 0x1C00000 0x1400000>;
+ };
+ partition@10 { /* for testing purpose */
+ label = "nand1-bitstream";
+ reg = <0x1 0x3000000 0x400000>;
+ };
+ partition@11 { /* for testing purpose */
+ label = "nand1-misc";
+ reg = <0x1 0x3400000 0xFCC00000>;
+ };
+};
+
&rtc {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
index 54c7b4f1d1e4..fabef11647a4 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
@@ -10,22 +10,26 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
/ {
model = "ZynqMP zc1751-xm018-dc4";
compatible = "xlnx,zynqmp-zc1751", "xlnx,zynqmp";
aliases {
+ can0 = &can0;
+ can1 = &can1;
ethernet0 = &gem0;
ethernet1 = &gem1;
ethernet2 = &gem2;
ethernet3 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &uart1;
+ spi0 = &qspi;
};
chosen {
@@ -111,6 +115,14 @@
status = "okay";
};
+&zynqmp_dpsub {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
+
&gem0 {
status = "okay";
phy-mode = "rgmii-id";
@@ -151,6 +163,10 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+};
+
&i2c0 {
clock-frequency = <400000>;
status = "okay";
@@ -161,6 +177,35 @@
status = "okay";
};
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "spi-flash"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts
index b8b5ff13818d..857dfd5c469b 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts
@@ -11,8 +11,9 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
/ {
model = "ZynqMP zc1751-xm019-dc5 RevA";
@@ -20,6 +21,7 @@
aliases {
ethernet0 = &gem1;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci0;
@@ -74,6 +76,8 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem1_default>;
phy0: phy@0 {
reg = <0>;
};
@@ -85,41 +89,366 @@
&i2c0 {
status = "okay";
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 74 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 75 GPIO_ACTIVE_HIGH>;
};
&i2c1 {
status = "okay";
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 76 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 77 GPIO_ACTIVE_HIGH>;
+
+};
+
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_18_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_18_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_74_grp", "gpio0_75_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_74_grp", "gpio0_75_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_19_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_19_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_76_grp", "gpio0_77_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_76_grp", "gpio0_77_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_17_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO71";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_18_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_18_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO73";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO72";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem1_default: gem1-default {
+ mux {
+ function = "ethernet1";
+ groups = "ethernet1_0_grp";
+ };
+
+ conf {
+ groups = "ethernet1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO44", "MIO45", "MIO46", "MIO47", "MIO48",
+ "MIO49";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO38", "MIO39", "MIO40", "MIO41", "MIO42",
+ "MIO43";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio1";
+ groups = "mdio1_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci0_default: sdhci0-default {
+ mux {
+ groups = "sdio0_0_grp";
+ function = "sdio0";
+ };
+
+ conf {
+ groups = "sdio0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio0_cd_0_grp";
+ function = "sdio0_cd";
+ };
+
+ conf-cd {
+ groups = "sdio0_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio0_wp_0_grp";
+ function = "sdio0_wp";
+ };
+
+ conf-wp {
+ groups = "sdio0_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_watchdog0_default: watchdog0-default {
+ mux-clk {
+ groups = "swdt0_clk_1_grp";
+ function = "swdt0_clk";
+ };
+
+ conf-clk {
+ groups = "swdt0_clk_1_grp";
+ bias-pull-up;
+ };
+
+ mux-rst {
+ groups = "swdt0_rst_1_grp";
+ function = "swdt0_rst";
+ };
+
+ conf-rst {
+ groups = "swdt0_rst_1_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
+
+ pinctrl_ttc0_default: ttc0-default {
+ mux-clk {
+ groups = "ttc0_clk_0_grp";
+ function = "ttc0_clk";
+ };
+
+ conf-clk {
+ groups = "ttc0_clk_0_grp";
+ bias-pull-up;
+ };
+
+ mux-wav {
+ groups = "ttc0_wav_0_grp";
+ function = "ttc0_wav";
+ };
+
+ conf-wav {
+ groups = "ttc0_wav_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
+
+ pinctrl_ttc1_default: ttc1-default {
+ mux-clk {
+ groups = "ttc1_clk_0_grp";
+ function = "ttc1_clk";
+ };
+
+ conf-clk {
+ groups = "ttc1_clk_0_grp";
+ bias-pull-up;
+ };
+
+ mux-wav {
+ groups = "ttc1_wav_0_grp";
+ function = "ttc1_wav";
+ };
+
+ conf-wav {
+ groups = "ttc1_wav_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
+
+ pinctrl_ttc2_default: ttc2-default {
+ mux-clk {
+ groups = "ttc2_clk_0_grp";
+ function = "ttc2_clk";
+ };
+
+ conf-clk {
+ groups = "ttc2_clk_0_grp";
+ bias-pull-up;
+ };
+
+ mux-wav {
+ groups = "ttc2_wav_0_grp";
+ function = "ttc2_wav";
+ };
+
+ conf-wav {
+ groups = "ttc2_wav_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
+
+ pinctrl_ttc3_default: ttc3-default {
+ mux-clk {
+ groups = "ttc3_clk_0_grp";
+ function = "ttc3_clk";
+ };
+
+ conf-clk {
+ groups = "ttc3_clk_0_grp";
+ bias-pull-up;
+ };
+
+ mux-wav {
+ groups = "ttc3_wav_0_grp";
+ function = "ttc3_wav";
+ };
+
+ conf-wav {
+ groups = "ttc3_wav_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
};
&sdhci0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci0_default>;
no-1-8-v;
+ xlnx,mio_bank = <0>;
};
&ttc0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ttc0_default>;
};
&ttc1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ttc1_default>;
};
&ttc2 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ttc2_default>;
};
&ttc3 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ttc3_default>;
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
&watchdog0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_watchdog0_default>;
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
index e5699d0d91e4..e0590d29051f 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
@@ -11,16 +11,19 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU100 RevC";
compatible = "xlnx,zynqmp-zcu100-revC", "xlnx,zynqmp-zcu100", "xlnx,zynqmp";
aliases {
+ gpio0 = &gpio;
i2c0 = &i2c1;
rtc0 = &rtc;
serial0 = &uart1;
@@ -28,6 +31,8 @@
serial2 = &dcc;
spi0 = &spi0;
spi1 = &spi1;
+ usb0 = &usb0;
+ usb1 = &usb1;
mmc0 = &sdhci0;
mmc1 = &sdhci1;
};
@@ -49,11 +54,20 @@
label = "sw4";
gpios = <&gpio 23 GPIO_ACTIVE_LOW>;
linux,code = <KEY_POWER>;
- wakeup-source;
+ gpio-key,wakeup;
autorepeat;
};
};
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&xilinx_ams 0>, <&xilinx_ams 1>, <&xilinx_ams 2>,
+ <&xilinx_ams 3>, <&xilinx_ams 4>, <&xilinx_ams 5>,
+ <&xilinx_ams 6>, <&xilinx_ams 7>, <&xilinx_ams 8>,
+ <&xilinx_ams 9>, <&xilinx_ams 10>,
+ <&xilinx_ams 11>, <&xilinx_ams 12>;
+ };
+
leds {
compatible = "gpio-leds";
ds2 {
@@ -82,13 +96,22 @@
linux,default-trigger = "bluetooth-power";
};
- vbus-det { /* U5 USB5744 VBUS detection via MIO25 */
+ vbus_det { /* U5 USB5744 VBUS detection via MIO25 */
label = "vbus_det";
gpios = <&gpio 25 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
};
+ ltc2954: ltc2954 { /* U7 */
+ compatible = "lltc,ltc2954", "lltc,ltc2952";
+ status = "disabled";
+ trigger-gpios = <&gpio 26 GPIO_ACTIVE_LOW>; /* INT line - input */
+ /* If there is HW watchdog on mezzanine this signal should be connected there */
+ watchdog-gpios = <&gpio 35 GPIO_ACTIVE_HIGH>; /* MIO on PAD */
+ kill-gpios = <&gpio 34 GPIO_ACTIVE_LOW>; /* KILL signal - output */
+ };
+
wmmcsdio_fixed: fixedregulator-mmcsdio {
compatible = "regulator-fixed";
regulator-name = "wmmcsdio_fixed";
@@ -98,10 +121,9 @@
regulator-boot-on;
};
- sdio_pwrseq: sdio-pwrseq {
+ sdio_pwrseq: sdio_pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
- post-power-on-delay-ms = <10>;
};
};
@@ -140,8 +162,17 @@
"", "", "", "";
};
+&gpu {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 4 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 5 GPIO_ACTIVE_HIGH>;
clock-frequency = <100000>;
i2c-mux@75 { /* u11 */
compatible = "nxp,pca9548";
@@ -218,6 +249,221 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_1_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_1_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_4_grp", "gpio0_5_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_4_grp", "gpio0_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_sdhci0_default: sdhci0-default {
+ mux {
+ groups = "sdio0_3_grp";
+ function = "sdio0";
+ };
+
+ conf {
+ groups = "sdio0_3_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio0_cd_0_grp";
+ function = "sdio0_cd";
+ };
+
+ conf-cd {
+ groups = "sdio0_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_2_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_2_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_spi0_default: spi0-default {
+ mux {
+ groups = "spi0_3_grp";
+ function = "spi0";
+ };
+
+ conf {
+ groups = "spi0_3_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-cs {
+ groups = "spi0_ss_9_grp";
+ function = "spi0_ss";
+ };
+
+ conf-cs {
+ groups = "spi0_ss_9_grp";
+ bias-disable;
+ };
+
+ };
+
+ pinctrl_spi1_default: spi1-default {
+ mux {
+ groups = "spi1_0_grp";
+ function = "spi1";
+ };
+
+ conf {
+ groups = "spi1_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-cs {
+ groups = "spi1_ss_0_grp";
+ function = "spi1_ss";
+ };
+
+ conf-cs {
+ groups = "spi1_ss_0_grp";
+ bias-disable;
+ };
+
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_0_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO3";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO2";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_0_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO1";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO0";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb1_default: usb1-default {
+ mux {
+ groups = "usb1_0_grp";
+ function = "usb1";
+ };
+
+ conf {
+ groups = "usb1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO64", "MIO65", "MIO67";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO66", "MIO68", "MIO69", "MIO70", "MIO71",
+ "MIO72", "MIO73", "MIO74", "MIO75";
+ bias-disable;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -226,13 +472,18 @@
&sdhci0 {
status = "okay";
no-1-8-v;
- broken-cd; /* CD has to be enabled by default */
disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci0_default>;
+ xlnx,mio_bank = <0>;
};
&sdhci1 {
status = "okay";
bus-width = <0x4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <0>;
non-removable;
disable-wp;
cap-power-off-card;
@@ -248,18 +499,30 @@
};
};
+&serdes {
+ status = "okay";
+};
+
&spi0 { /* Low Speed connector */
status = "okay";
label = "LS-SPI0";
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0_default>;
};
&spi1 { /* High Speed connector */
status = "okay";
label = "HS-SPI1";
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
bluetooth {
compatible = "ti,wl1831-st";
enable-gpios = <&gpio 8 GPIO_ACTIVE_HIGH>;
@@ -268,19 +531,76 @@
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "peripheral";
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 0 26000000>;
+ maximum-speed = "super-speed";
};
/* ULPI SMSC USB3320 */
&usb1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1_default>;
+};
+
+&dwc3_1 {
+ status = "okay";
+ dr_mode = "host";
+ phy-names = "usb3-phy";
+ phys = <&lane3 PHY_TYPE_USB3 1 0 26000000>;
+ maximum-speed = "super-speed";
};
&watchdog0 {
status = "okay";
};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 1 27000000>,
+ <&lane0 PHY_TYPE_DP 1 1 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
+
+&xilinx_ams {
+ status = "okay";
+};
+
+&ams_ps {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
index 2a3b66547c6d..2741040fe26a 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
@@ -10,9 +10,11 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU102 RevA";
@@ -20,6 +22,7 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci1;
@@ -27,11 +30,14 @@
serial0 = &uart0;
serial1 = &uart1;
serial2 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
bootargs = "earlycon";
stdout-path = "serial0:115200n8";
+ xlnx,eeprom = &eeprom;
};
memory@0 {
@@ -46,14 +52,14 @@
label = "sw19";
gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_DOWN>;
- wakeup-source;
+ gpio-key,wakeup;
autorepeat;
};
};
leds {
compatible = "gpio-leds";
- heartbeat-led {
+ heartbeat_led {
label = "heartbeat";
gpios = <&gpio 23 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
@@ -63,6 +69,8 @@
&can1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
};
&dcc {
@@ -105,60 +113,64 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
phy0: phy@21 {
reg = <21>;
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
ti,fifo-depth = <0x1>;
ti,dp83867-rxctrl-strap-quirk;
+ /* reset-gpios = <&tca6416_u97 6 GPIO_ACTIVE_LOW>; */
};
};
&gpio {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_default>;
+};
+
+&gpu {
+ status = "okay";
};
&i2c0 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 14 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>;
tca6416_u97: gpio@20 {
compatible = "ti,tca6416";
reg = <0x20>;
- gpio-controller;
+ gpio-controller; /* IRQ not connected */
#gpio-cells = <2>;
- /*
- * IRQ not connected
- * Lines:
- * 0 - PS_GTR_LAN_SEL0
- * 1 - PS_GTR_LAN_SEL1
- * 2 - PS_GTR_LAN_SEL2
- * 3 - PS_GTR_LAN_SEL3
- * 4 - PCI_CLK_DIR_SEL
- * 5 - IIC_MUX_RESET_B
- * 6 - GEM3_EXP_RESET_B
- * 7, 10 - 17 - not connected
- */
-
- gtr-sel0 {
+ gpio-line-names = "PS_GTR_LAN_SEL0", "PS_GTR_LAN_SEL1", "PS_GTR_LAN_SEL2", "PS_GTR_LAN_SEL3",
+ "PCI_CLK_DIR_SEL", "IIC_MUX_RESET_B", "GEM3_EXP_RESET_B",
+ "", "", "", "", "", "", "", "", "";
+ gtr_sel0 {
gpio-hog;
gpios = <0 0>;
output-low; /* PCIE = 0, DP = 1 */
line-name = "sel0";
};
- gtr-sel1 {
+ gtr_sel1 {
gpio-hog;
gpios = <1 0>;
output-high; /* PCIE = 0, DP = 1 */
line-name = "sel1";
};
- gtr-sel2 {
+ gtr_sel2 {
gpio-hog;
gpios = <2 0>;
output-high; /* PCIE = 0, USB0 = 1 */
line-name = "sel2";
};
- gtr-sel3 {
+ gtr_sel3 {
gpio-hog;
gpios = <3 0>;
output-high; /* PCIE = 0, SATA = 1 */
@@ -169,27 +181,12 @@
tca6416_u61: gpio@21 {
compatible = "ti,tca6416";
reg = <0x21>;
- gpio-controller;
+ gpio-controller; /* IRQ not connected */
#gpio-cells = <2>;
- /*
- * IRQ not connected
- * Lines:
- * 0 - VCCPSPLL_EN
- * 1 - MGTRAVCC_EN
- * 2 - MGTRAVTT_EN
- * 3 - VCCPSDDRPLL_EN
- * 4 - MIO26_PMU_INPUT_LS
- * 5 - PL_PMBUS_ALERT
- * 6 - PS_PMBUS_ALERT
- * 7 - MAXIM_PMBUS_ALERT
- * 10 - PL_DDR4_VTERM_EN
- * 11 - PL_DDR4_VPP_2V5_EN
- * 12 - PS_DIMM_VDDQ_TO_PSVCCO_ON
- * 13 - PS_DIMM_SUSPEND_EN
- * 14 - PS_DDR4_VTERM_EN
- * 15 - PS_DDR4_VPP_2V5_EN
- * 16 - 17 - not connected
- */
+ gpio-line-names = "VCCPSPLL_EN", "MGTRAVCC_EN", "MGTRAVTT_EN", "VCCPSDDRPLL_EN", "MIO26_PMU_INPUT_LS",
+ "PL_PMBUS_ALERT", "PS_PMBUS_ALERT", "MAXIM_PMBUS_ALERT", "PL_DDR4_VTERM_EN",
+ "PL_DDR4_VPP_2V5_EN", "PS_DIMM_VDDQ_TO_PSVCCO_ON", "PS_DIMM_SUSPEND_EN",
+ "PS_DDR4_VTERM_EN", "PS_DDR4_VPP_2V5_EN", "", "";
};
i2c-mux@75 { /* u60 */
@@ -353,7 +350,6 @@
status = "disabled"; /* unreachable */
reg = <0x20>;
};
-
max20751@72 { /* u95 */
compatible = "maxim,max20751";
reg = <0x72>;
@@ -370,6 +366,11 @@
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
/* PL i2c via PCA9306 - u45 */
i2c-mux@74 { /* u34 */
@@ -399,6 +400,7 @@
#size-cells = <0>;
reg = <1>;
si5341: clock-generator@36 { /* SI5341 - u69 */
+ compatible = "silabs,si5341";
reg = <0x36>;
};
@@ -414,6 +416,7 @@
temperature-stability = <50>;
factory-fout = <300000000>;
clock-frequency = <300000000>;
+ clock-output-names = "si570_user";
};
};
i2c@3 {
@@ -427,6 +430,7 @@
temperature-stability = <50>; /* copy from zc702 */
factory-fout = <156250000>;
clock-frequency = <148500000>;
+ clock-output-names = "si570_mgt";
};
};
i2c@4 {
@@ -434,6 +438,7 @@
#size-cells = <0>;
reg = <4>;
si5328: clock-generator@69 {/* SI5328 - u20 */
+ compatible = "silabs,si5328";
reg = <0x69>;
/*
* Chip has interrupt present connected to PL
@@ -502,8 +507,302 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_3_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_3_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_5_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO21";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO20";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_6_grp";
+ };
+
+ conf {
+ groups = "can1_6_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO25";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO24";
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio1_wp_0_grp";
+ function = "sdio1_wp";
+ };
+
+ conf-wp {
+ groups = "sdio1_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_gpio_default: gpio-default {
+ mux-sw {
+ function = "gpio0";
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ };
+
+ conf-sw {
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-msp {
+ function = "gpio0";
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ };
+
+ conf-msp {
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-pull-up {
+ pins = "MIO22", "MIO23";
+ bias-pull-up;
+ };
+
+ conf-pull-none {
+ pins = "MIO13", "MIO38";
+ bias-disable;
+ };
+ };
+};
+
&pcie {
status = "okay";
+ reset-gpio = <&gpio 31 GPIO_ACTIVE_HIGH>;
+};
+
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ flash@0 {
+ compatible = "m25p80", "spi-flash"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
};
&rtc {
@@ -521,27 +820,89 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
no-1-8-v;
+ xlnx,mio_bank = <1>;
+};
+
+&serdes {
+ status = "okay";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+ maximum-speed = "super-speed";
};
&watchdog0 {
status = "okay";
};
+
+&xilinx_ams {
+ status = "okay";
+};
+
+&ams_ps {
+ status = "okay";
+};
+
+&ams_pl {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>;
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
index 1780ed237daf..fb235df60ce6 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
@@ -22,6 +22,7 @@
ti,tx-internal-delay = <0xa>;
ti,fifo-depth = <0x1>;
ti,dp83867-rxctrl-strap-quirk;
+ /* reset-gpios = <&tca6416_u97 6 GPIO_ACTIVE_LOW>; */
};
/* Cleanup from RevA */
/delete-node/ phy@21;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
index 8f456146409f..6b88b9b5bc03 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
@@ -10,8 +10,10 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU104 RevA";
@@ -19,12 +21,15 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c1;
mmc0 = &sdhci1;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
@@ -63,9 +68,18 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
/* Another connection to this bus via PL i2c via PCA9306 - u45 */
i2c-mux@74 { /* u34 */
@@ -85,7 +99,7 @@
* 512B - 768B address 0x56
* 768B - 1024B address 0x57
*/
- eeprom@54 { /* u23 */
+ eeprom: eeprom@54 { /* u23 */
compatible = "atmel,24c08";
reg = <0x54>;
#address-cells = <1>;
@@ -98,6 +112,7 @@
#size-cells = <0>;
reg = <1>;
clock_8t49n287: clock-generator@6c { /* 8T49N287 - u182 */
+ compatible = "idt,8t49n287";
reg = <0x6c>;
};
};
@@ -107,9 +122,13 @@
#size-cells = <0>;
reg = <2>;
irps5401_43: irps54012@43 { /* IRPS5401 - u175 */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
reg = <0x43>;
};
irps5401_4d: irps54012@4d { /* IRPS5401 - u180 */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
reg = <0x4d>;
};
};
@@ -118,9 +137,9 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
- tca6416_u97: gpio@21 {
+ tca6416_u97: gpio@20 {
compatible = "ti,tca6416";
- reg = <0x21>;
+ reg = <0x20>;
gpio-controller;
#gpio-cells = <2>;
/*
@@ -154,6 +173,233 @@
};
};
+&pinctrl0 {
+ status = "okay";
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_6_grp";
+ };
+
+ conf {
+ groups = "can1_6_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO25";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO24";
+ bias-disable;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ drive-strength = <12>;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_5_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO21";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO20";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "spi-flash"; /* n25q512a 128MiB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -169,28 +415,90 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
no-1-8-v;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <1>;
disable-wp;
};
+&serdes {
+ status = "okay";
+};
+
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+ maximum-speed = "super-speed";
};
&watchdog0 {
status = "okay";
};
+
+&xilinx_ams {
+ status = "okay";
+};
+
+&ams_ps {
+ status = "okay";
+};
+
+&ams_pl {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>, <&lane0 PHY_TYPE_DP 1 3 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts
new file mode 100644
index 000000000000..1ee284f1e22a
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dts file for Xilinx ZynqMP ZCU104
+ *
+ * (C) Copyright 2017 - 2018, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
+
+/ {
+ model = "ZynqMP ZCU104 RevC";
+ compatible = "xlnx,zynqmp-zcu104-revC", "xlnx,zynqmp-zcu104", "xlnx,zynqmp";
+
+ aliases {
+ ethernet0 = &gem3;
+ gpio0 = &gpio;
+ i2c0 = &i2c1;
+ mmc0 = &sdhci1;
+ rtc0 = &rtc;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ xlnx,eeprom = &eeprom;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+};
+
+&can1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
+};
+
+&dcc {
+ status = "okay";
+};
+
+&fpd_dma_chan1 {
+ status = "okay";
+};
+
+&fpd_dma_chan2 {
+ status = "okay";
+};
+
+&fpd_dma_chan3 {
+ status = "okay";
+};
+
+&fpd_dma_chan4 {
+ status = "okay";
+};
+
+&fpd_dma_chan5 {
+ status = "okay";
+};
+
+&fpd_dma_chan6 {
+ status = "okay";
+};
+
+&fpd_dma_chan7 {
+ status = "okay";
+};
+
+&fpd_dma_chan8 {
+ status = "okay";
+};
+
+&gem3 {
+ status = "okay";
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
+ phy0: phy@c {
+ reg = <0xc>;
+ ti,rx-internal-delay = <0x8>;
+ ti,tx-internal-delay = <0xa>;
+ ti,fifo-depth = <0x1>;
+ ti,dp83867-rxctrl-strap-quirk;
+ };
+};
+
+&gpio {
+ status = "okay";
+};
+
+&gpu {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+
+ tca6416_u97: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ /*
+ * IRQ not connected
+ * Lines:
+ * 0 - IRPS5401_ALERT_B
+ * 1 - HDMI_8T49N241_INT_ALM
+ * 2 - MAX6643_OT_B
+ * 3 - MAX6643_FANFAIL_B
+ * 5 - IIC_MUX_RESET_B
+ * 6 - GEM3_EXP_RESET_B
+ * 7 - FMC_LPC_PRSNT_M2C_B
+ * 4, 10 - 17 - not connected
+ */
+ };
+
+ /* Another connection to this bus via PL i2c via PCA9306 - u45 */
+ i2c-mux@74 { /* u34 */
+ compatible = "nxp,pca9548";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x74>;
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ /*
+ * IIC_EEPROM 1kB memory which uses 256B blocks
+ * where every block has different address.
+ * 0 - 256B address 0x54
+ * 256B - 512B address 0x55
+ * 512B - 768B address 0x56
+ * 768B - 1024B address 0x57
+ */
+ eeprom: eeprom@54 { /* u23 */
+ compatible = "atmel,24c08";
+ reg = <0x54>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ clock_8t49n287: clock-generator@6c { /* 8T49N287 - u182 */
+ compatible = "idt,8t49n287";
+ reg = <0x6c>;
+ };
+ };
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ irps5401_43: irps54012@43 { /* IRPS5401 - u175 */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
+ reg = <0x43>;
+ };
+ irps5401_4d: irps54012@4d { /* IRPS5401 - u180 */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
+ reg = <0x4d>;
+ };
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ ina226@40 { /* u183 */
+ compatible = "ti,ina226";
+ reg = <0x40>;
+ shunt-resistor = <5000>;
+ };
+ };
+
+ i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
+
+ /* 4, 6 not connected */
+ };
+};
+
+&pinctrl0 {
+ status = "okay";
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_6_grp";
+ };
+
+ conf {
+ groups = "can1_6_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO25";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO24";
+ bias-disable;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ drive-strength = <12>;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_5_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO21";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO20";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "spi-flash"; /* n25q512a 128MiB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
+&rtc {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+ /* SATA OOB timing settings */
+ ceva,p0-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>;
+ ceva,p0-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
+ ceva,p0-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
+ ceva,p0-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ ceva,p1-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>;
+ ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
+ ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
+ ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
+};
+
+/* SD1 with level shifter */
+&sdhci1 {
+ status = "okay";
+ no-1-8-v;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <1>;
+ disable-wp;
+};
+
+&serdes {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
+};
+
+&uart1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
+};
+
+/* ULPI SMSC USB3320 */
+&usb0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+ maximum-speed = "super-speed";
+};
+
+&watchdog0 {
+ status = "okay";
+};
+
+&xilinx_ams {
+ status = "okay";
+};
+
+&ams_ps {
+ status = "okay";
+};
+
+&ams_pl {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>, <&lane0 PHY_TYPE_DP 1 3 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
index 93ce7eb81498..32c35869a1a9 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
@@ -10,9 +10,11 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU106 RevA";
@@ -20,6 +22,7 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci1;
@@ -27,11 +30,14 @@
serial0 = &uart0;
serial1 = &uart1;
serial2 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
bootargs = "earlycon";
stdout-path = "serial0:115200n8";
+ xlnx,eeprom = &eeprom;
};
memory@0 {
@@ -46,14 +52,14 @@
label = "sw19";
gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_DOWN>;
- wakeup-source;
+ gpio-key,wakeup;
autorepeat;
};
};
leds {
compatible = "gpio-leds";
- heartbeat-led {
+ heartbeat_led {
label = "heartbeat";
gpios = <&gpio 23 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
@@ -63,13 +69,14 @@
&can1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
};
&dcc {
status = "okay";
};
-/* fpd_dma clk 667MHz, lpd_dma 500MHz */
&fpd_dma_chan1 {
status = "okay";
};
@@ -106,6 +113,8 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
phy0: phy@c {
reg = <0xc>;
ti,rx-internal-delay = <0x8>;
@@ -117,11 +126,22 @@
&gpio {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_default>;
+};
+
+&gpu {
+ status = "okay";
};
&i2c0 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 14 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>;
tca6416_u97: gpio@20 {
compatible = "ti,tca6416";
@@ -344,6 +364,11 @@
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
/* PL i2c via PCA9306 - u45 */
i2c-mux@74 { /* u34 */
@@ -373,6 +398,7 @@
#size-cells = <0>;
reg = <1>;
si5341: clock-generator@36 { /* SI5341 - u69 */
+ compatible = "si5341";
reg = <0x36>;
};
@@ -388,6 +414,7 @@
temperature-stability = <50>;
factory-fout = <300000000>;
clock-frequency = <300000000>;
+ clock-output-names = "si570_user";
};
};
i2c@3 {
@@ -401,6 +428,7 @@
temperature-stability = <50>; /* copy from zc702 */
factory-fout = <156250000>;
clock-frequency = <148500000>;
+ clock-output-names = "si570_mgt";
};
};
i2c@4 {
@@ -408,6 +436,7 @@
#size-cells = <0>;
reg = <4>;
si5328: clock-generator@69 {/* SI5328 - u20 */
+ compatible = "silabs,si5328";
reg = <0x69>;
};
};
@@ -480,6 +509,299 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_3_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_3_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_5_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO21";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO20";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_6_grp";
+ };
+
+ conf {
+ groups = "can1_6_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO25";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO24";
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio1_wp_0_grp";
+ function = "sdio1_wp";
+ };
+
+ conf-wp {
+ groups = "sdio1_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_gpio_default: gpio-default {
+ mux {
+ function = "gpio0";
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ };
+
+ conf {
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-msp {
+ function = "gpio0";
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ };
+
+ conf-msp {
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-pull-up {
+ pins = "MIO22";
+ bias-pull-up;
+ };
+
+ conf-pull-none {
+ pins = "MIO13", "MIO23", "MIO38";
+ bias-disable;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ flash@0 {
+ compatible = "m25p80", "spi-flash"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -495,27 +817,76 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
no-1-8-v;
+ xlnx,mio_bank = <1>;
+};
+
+&serdes {
+ status = "okay";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
};
&watchdog0 {
status = "okay";
};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>, <&lane0 PHY_TYPE_DP 1 3 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
index 8bb0001a026f..2d6be96e713d 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
@@ -10,9 +10,11 @@
/dts-v1/;
#include "zynqmp.dtsi"
-#include "zynqmp-clk.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU111 RevA";
@@ -20,17 +22,21 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci1;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
bootargs = "earlycon";
stdout-path = "serial0:115200n8";
+ xlnx,eeprom = &eeprom;
};
memory@0 {
@@ -46,14 +52,14 @@
label = "sw19";
gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_DOWN>;
- wakeup-source;
+ gpio-key,wakeup;
autorepeat;
};
};
leds {
compatible = "gpio-leds";
- heartbeat-led {
+ heartbeat_led {
label = "heartbeat";
gpios = <&gpio 23 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
@@ -101,6 +107,8 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
phy0: phy@c {
reg = <0xc>;
ti,rx-internal-delay = <0x8>;
@@ -112,11 +120,22 @@
&gpio {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_default>;
+};
+
+&gpu {
+ status = "okay";
};
&i2c0 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 14 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>;
tca6416_u22: gpio@20 {
compatible = "ti,tca6416";
@@ -234,12 +253,18 @@
#size-cells = <0>;
reg = <2>;
irps5401_43: irps54012@43 { /* IRPS5401 - u53 check these */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
reg = <0x43>;
};
irps5401_44: irps54012@44 { /* IRPS5401 - u55 */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
reg = <0x44>;
};
irps5401_45: irps54012@45 { /* IRPS5401 - u57 */
+ #clock-cells = <0>;
+ compatible = "infineon,irps5401";
reg = <0x45>;
};
/* u68 IR38064 +0 */
@@ -261,6 +286,11 @@
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
i2c-mux@74 { /* u26 */
compatible = "nxp,pca9548";
@@ -289,6 +319,7 @@
#size-cells = <0>;
reg = <1>;
si5341: clock-generator@36 { /* SI5341 - u46 */
+ compatible = "si5341";
reg = <0x36>;
};
@@ -304,6 +335,7 @@
temperature-stability = <50>;
factory-fout = <300000000>;
clock-frequency = <300000000>;
+ clock-output-names = "si570_user";
};
};
i2c@3 {
@@ -317,6 +349,7 @@
temperature-stability = <50>;
factory-fout = <156250000>;
clock-frequency = <148500000>;
+ clock-output-names = "si570_mgt";
};
};
i2c@4 {
@@ -324,6 +357,7 @@
#size-cells = <0>;
reg = <4>;
si5328: clock-generator@69 { /* SI5328 - u48 */
+ compatible = "silabs,si5328";
reg = <0x69>;
};
};
@@ -410,6 +444,240 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_3_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_3_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_gpio_default: gpio-default {
+ mux {
+ function = "gpio0";
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ };
+
+ conf {
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-msp {
+ function = "gpio0";
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ };
+
+ conf-msp {
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-pull-up {
+ pins = "MIO22";
+ bias-pull-up;
+ };
+
+ conf-pull-none {
+ pins = "MIO13", "MIO23", "MIO38";
+ bias-disable;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ flash@0 {
+ compatible = "m25p80", "spi-flash"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@qspi-fsbl-uboot { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@qspi-linux { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@qspi-device-tree { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@qspi-rootfs { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -425,19 +693,67 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 3 125000000>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
no-1-8-v;
+ disable-wp;
+ xlnx,mio_bank = <1>;
+};
+
+&serdes {
+ status = "okay";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 1 27000000>, <&lane0 PHY_TYPE_DP 1 1 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu1285-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1285-revA.dts
new file mode 100644
index 000000000000..13e36d0ba6d5
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1285-revA.dts
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dts file for Xilinx ZynqMP ZC1275 RevB
+ *
+ * (C) Copyright 2018 - 2019, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Siva Durga Prasad Paladugu <sivadur@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+
+/ {
+ model = "ZynqMP ZCU1285 RevA";
+ compatible = "xlnx,zynqmp-zcu1285-revA", "xlnx,zynqmp-zcu1285", "xlnx,zynqmp";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &dcc;
+ spi0 = &qspi;
+ mmc0 = &sdhci1;
+ ethernet0 = &gem1; /* EMIO */
+ ethernet1 = &gem3; /* PS ethernet */
+ i2c = &i2c0; /* EMIO */
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+};
+
+&dcc {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ i2c-mux@75 {
+ compatible = "nxp,pca9548"; /* u22 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x75>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ /* PMBUS */
+ max20751@74 { /* u23 */
+ compatible = "maxim,max20751";
+ reg = <0x74>;
+ };
+ max20751@70 { /* u89 */
+ compatible = "maxim,max20751";
+ reg = <0x70>;
+ };
+ max15301@a { /* u28 */
+ compatible = "maxim,max15301";
+ reg = <0xa>;
+ };
+ max15303@b { /* u48 */
+ compatible = "maxim,max15303";
+ reg = <0xb>;
+ };
+ max15303@d { /* u27 */
+ compatible = "maxim,max15303";
+ reg = <0xd>;
+ };
+ max15303@e { /* u11 */
+ compatible = "maxim,max15303";
+ reg = <0xe>;
+ };
+ max15303@f { /* u96 */
+ compatible = "maxim,max15303";
+ reg = <0xf>;
+ };
+ max15303@11 { /* u47 */
+ compatible = "maxim,max15303";
+ reg = <0x11>;
+ };
+ max15303@12 { /* u24 */
+ compatible = "maxim,max15303";
+ reg = <0x12>;
+ };
+ max15301@13 { /* u29 */
+ compatible = "maxim,max15301";
+ reg = <0x13>;
+ };
+ max15303@14 { /* u51 */
+ compatible = "maxim,max15303";
+ reg = <0x14>;
+ };
+ max15303@15 { /* u30 */
+ compatible = "maxim,max15303";
+ reg = <0x15>;
+ };
+ max15303@16 { /* u102 */
+ compatible = "maxim,max15303";
+ reg = <0x16>;
+ };
+ max15301@17 { /* u50 */
+ compatible = "maxim,max15301";
+ reg = <0x17>;
+ };
+ max15301@18 { /* u31 */
+ compatible = "maxim,max15301";
+ reg = <0x18>;
+ };
+ };
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ /* CM_I2C */
+ };
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ /* SYS_EEPROM */
+ eeprom: eeprom@54 { /* u101 */
+ compatible = "atmel,24c32"; /* 24LC32A */
+ reg = <0x54>;
+ };
+ };
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ /* FMC1 */
+ };
+ i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ /* FMC2 */
+ };
+ i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ /* ANALOG_PMBUS */
+ ina226@40 { /* u60 */
+ compatible = "ti,ina226";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+ ina226@41 { /* u61 */
+ compatible = "ti,ina226";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+ ina226@42 { /* u63 */
+ compatible = "ti,ina226";
+ reg = <0x42>;
+ shunt-resistor = <1000>;
+ };
+ ina226@43 { /* u65 */
+ compatible = "ti,ina226";
+ reg = <0x43>;
+ shunt-resistor = <1000>;
+ };
+ ina226@44 { /* u64 */
+ compatible = "ti,ina226";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+ };
+ i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ /* ANALOG_CM_I2C */
+ };
+ i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ /* FMC3 */
+ };
+ };
+};
+
+&gem1 {
+ mdio {
+ phy1: ethernet-phy@1 {
+ reg = <1>; /* KSZ9031RNXIC on AES-FMC-NETW1-G */
+ rxc-skew-ps = <1800>; /* Skew control of RX_CLK pad output */
+ txc-skew-ps = <1800>; /* Skew control of GTX_CLK pad input */
+ txen-skew-ps = <900>; /* Skew control of TX_CTL pad input */
+ rxdv-skew-ps = <0>; /* Skew control of RX_CTL pad output */
+ rxd0-skew-ps = <0>; /* Skew control of RXD0 pad output */
+ rxd1-skew-ps = <0>; /* Skew control of RXD1 pad output */
+ rxd2-skew-ps = <0>; /* Skew control of RXD2 pad output */
+ rxd3-skew-ps = <0>; /* Skew control of RXD3 pad output */
+ txd0-skew-ps = <900>; /* Skew control of TXD0 pad input */
+ txd1-skew-ps = <900>; /* Skew control of TXD1 pad input */
+ txd2-skew-ps = <900>; /* Skew control of TXD2 pad input */
+ txd3-skew-ps = <900>; /* Skew control of TXD3 pad input */
+ };
+ };
+};
+
+&gem3 {
+ status = "okay";
+ phy-mode = "rgmii";
+ phy-handle = <&phy2>;
+ phy2: ethernet-phy@1 {
+ reg = <1>; /* KSZ9031RNXIC */
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "spi-flash"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <1>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&sdhci1 {
+ status = "okay";
+ no-1-8-v;
+ xlnx,mio_bank = <1>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 9aa67340a4d8..c2baf0f5f51d 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -12,6 +12,9 @@
* the License, or (at your option) any later version.
*/
+#include <dt-bindings/power/xlnx-zynqmp-power.h>
+#include <dt-bindings/reset/xlnx-zynqmp-resets.h>
+
/ {
compatible = "xlnx,zynqmp";
#address-cells = <2>;
@@ -71,7 +74,7 @@
};
};
- cpu_opp_table: cpu-opp-table {
+ cpu_opp_table: cpu_opp_table {
compatible = "operating-points-v2";
opp-shared;
opp00 {
@@ -99,6 +102,28 @@
dcc: dcc {
compatible = "arm,dcc";
status = "disabled";
+ u-boot,dm-pre-reloc;
+ };
+
+ zynqmp_ipi {
+ compatible = "xlnx,zynqmp-ipi-mailbox";
+ interrupt-parent = <&gic>;
+ interrupts = <0 35 4>;
+ xlnx,ipi-id = <0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ipi_mailbox_pmu1: mailbox@ff990400 {
+ reg = <0x0 0xff9905c0 0x0 0x20>,
+ <0x0 0xff9905e0 0x0 0x20>,
+ <0x0 0xff990e80 0x0 0x20>,
+ <0x0 0xff990ea0 0x0 0x20>;
+ reg-names = "local_request_region", "local_response_region",
+ "remote_request_region", "remote_response_region";
+ #mbox-cells = <1>;
+ xlnx,ipi-id = <4>;
+ };
};
pmu {
@@ -115,6 +140,34 @@
method = "smc";
};
+ firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+ #power-domain-cells = <0x1>;
+ u-boot,dm-pre-reloc;
+
+ zynqmp_power: zynqmp-power {
+ compatible = "xlnx,zynqmp-power";
+ interrupt-parent = <&gic>;
+ interrupts = <0 35 4>;
+ mboxes = <&ipi_mailbox_pmu1 0>,
+ <&ipi_mailbox_pmu1 1>;
+ mbox-names = "tx", "rx";
+ };
+
+ zynqmp_reset: reset-controller {
+ compatible = "xlnx,zynqmp-reset";
+ #reset-cells = <1>;
+ };
+
+ pinctrl0: pinctrl {
+ compatible = "xlnx,zynqmp-pinctrl";
+ status = "disabled";
+ };
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupt-parent = <&gic>;
@@ -124,7 +177,94 @@
<1 10 0xf08>;
};
- amba_apu: amba-apu@0 {
+ edac {
+ compatible = "arm,cortex-a53-edac";
+ };
+
+ fpga_full: fpga-full {
+ compatible = "fpga-region";
+ fpga-mgr = <&pcap>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ };
+
+ nvmem_firmware {
+ compatible = "xlnx,zynqmp-nvmem-fw";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ soc_revision: soc_revision@0 {
+ reg = <0x0 0x4>;
+ };
+ /* efuse access */
+ efuse_dna: efuse_dna@c {
+ reg = <0xc 0xc>;
+ };
+ efuse_usr0: efuse_usr0@20 {
+ reg = <0x20 0x4>;
+ };
+ efuse_usr1: efuse_usr1@24 {
+ reg = <0x24 0x4>;
+ };
+ efuse_usr2: efuse_usr2@28 {
+ reg = <0x28 0x4>;
+ };
+ efuse_usr3: efuse_usr3@2c {
+ reg = <0x2c 0x4>;
+ };
+ efuse_usr4: efuse_usr4@30 {
+ reg = <0x30 0x4>;
+ };
+ efuse_usr5: efuse_usr5@34 {
+ reg = <0x34 0x4>;
+ };
+ efuse_usr6: efuse_usr6@38 {
+ reg = <0x38 0x4>;
+ };
+ efuse_usr7: efuse_usr7@3c {
+ reg = <0x3c 0x4>;
+ };
+ efuse_miscusr: efuse_miscusr@40 {
+ reg = <0x40 0x4>;
+ };
+ efuse_chash: efuse_chash@50 {
+ reg = <0x50 0x4>;
+ };
+ efuse_pufmisc: efuse_pufmisc@54 {
+ reg = <0x54 0x4>;
+ };
+ efuse_sec: efuse_sec@58 {
+ reg = <0x58 0x4>;
+ };
+ efuse_spkid: efuse_spkid@5c {
+ reg = <0x5c 0x4>;
+ };
+ efuse_ppk0hash: efuse_ppk0hash@a0 {
+ reg = <0xa0 0x30>;
+ };
+ efuse_ppk1hash: efuse_ppk1hash@d0 {
+ reg = <0xd0 0x30>;
+ };
+ };
+
+ pcap: pcap {
+ compatible = "xlnx,zynqmp-pcap-fpga";
+ clock-names = "ref_clk";
+ };
+
+ xlnx_rsa: zynqmp_rsa {
+ compatible = "xlnx,zynqmp-rsa";
+ };
+
+ xlnx_keccak_384: sha384 {
+ compatible = "xlnx,zynqmp-keccak-384";
+ };
+
+ xlnx_aes: zynqmp_aes {
+ compatible = "xlnx,zynqmp-aes";
+ };
+
+ amba_apu: amba_apu@0 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <1>;
@@ -143,8 +283,23 @@
};
};
+ smmu: smmu@fd800000 {
+ compatible = "arm,mmu-500";
+ reg = <0x0 0xfd800000 0x0 0x20000>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ #global-interrupts = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 155 4>,
+ <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
+ <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
+ <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
+ <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>;
+ };
+
amba: amba {
compatible = "simple-bus";
+ u-boot,dm-pre-reloc;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -158,6 +313,7 @@
interrupt-parent = <&gic>;
tx-fifo-depth = <0x40>;
rx-fifo-depth = <0x40>;
+ power-domains = <&zynqmp_firmware PD_CAN_0>;
};
can1: can@ff070000 {
@@ -169,6 +325,7 @@
interrupt-parent = <&gic>;
tx-fifo-depth = <0x40>;
rx-fifo-depth = <0x40>;
+ power-domains = <&zynqmp_firmware PD_CAN_1>;
};
cci: cci@fd6e0000 {
@@ -199,6 +356,9 @@
interrupts = <0 124 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14e8>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan2: dma@fd510000 {
@@ -209,6 +369,9 @@
interrupts = <0 125 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14e9>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan3: dma@fd520000 {
@@ -219,6 +382,9 @@
interrupts = <0 126 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ea>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan4: dma@fd530000 {
@@ -229,6 +395,9 @@
interrupts = <0 127 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14eb>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan5: dma@fd540000 {
@@ -239,6 +408,9 @@
interrupts = <0 128 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ec>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan6: dma@fd550000 {
@@ -249,6 +421,9 @@
interrupts = <0 129 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ed>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan7: dma@fd560000 {
@@ -259,6 +434,9 @@
interrupts = <0 130 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ee>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
};
fpd_dma_chan8: dma@fd570000 {
@@ -269,6 +447,20 @@
interrupts = <0 131 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ef>;
+ power-domains = <&zynqmp_firmware PD_GDMA>;
+ };
+
+ gpu: gpu@fd4b0000 {
+ status = "disabled";
+ compatible = "arm,mali-400", "arm,mali-utgard";
+ reg = <0x0 0xfd4b0000 0x0 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 132 4>, <0 132 4>, <0 132 4>, <0 132 4>, <0 132 4>, <0 132 4>;
+ interrupt-names = "IRQGP", "IRQGPMMU", "IRQPP0", "IRQPPMMU0", "IRQPP1", "IRQPPMMU1";
+ clock-names = "gpu", "gpu_pp0", "gpu_pp1";
+ power-domains = <&zynqmp_firmware PD_GPU>;
};
/* LPDDMA default allows only secured access. inorder to enable
@@ -283,6 +475,9 @@
interrupts = <0 77 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x868>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan2: dma@ffa90000 {
@@ -293,6 +488,9 @@
interrupts = <0 78 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x869>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan3: dma@ffaa0000 {
@@ -303,6 +501,9 @@
interrupts = <0 79 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86a>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan4: dma@ffab0000 {
@@ -313,6 +514,9 @@
interrupts = <0 80 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86b>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan5: dma@ffac0000 {
@@ -323,6 +527,9 @@
interrupts = <0 81 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86c>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan6: dma@ffad0000 {
@@ -333,6 +540,9 @@
interrupts = <0 82 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86d>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan7: dma@ffae0000 {
@@ -343,6 +553,9 @@
interrupts = <0 83 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86e>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
lpd_dma_chan8: dma@ffaf0000 {
@@ -353,6 +566,9 @@
interrupts = <0 84 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86f>; */
+ power-domains = <&zynqmp_firmware PD_ADMA>;
};
mc: memory-controller@fd070000 {
@@ -362,6 +578,20 @@
interrupts = <0 112 4>;
};
+ nand0: nand@ff100000 {
+ compatible = "arasan,nfc-v3p10";
+ status = "disabled";
+ reg = <0x0 0xff100000 0x0 0x1000>;
+ clock-names = "clk_sys", "clk_flash";
+ interrupt-parent = <&gic>;
+ interrupts = <0 14 4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x872>;
+ power-domains = <&zynqmp_firmware PD_NAND>;
+ };
+
gem0: ethernet@ff0b0000 {
compatible = "cdns,zynqmp-gem", "cdns,gem";
status = "disabled";
@@ -371,6 +601,9 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x874>;
+ power-domains = <&zynqmp_firmware PD_ETH_0>;
};
gem1: ethernet@ff0c0000 {
@@ -382,6 +615,9 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x875>;
+ power-domains = <&zynqmp_firmware PD_ETH_1>;
};
gem2: ethernet@ff0d0000 {
@@ -393,6 +629,9 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x876>;
+ power-domains = <&zynqmp_firmware PD_ETH_2>;
};
gem3: ethernet@ff0e0000 {
@@ -404,18 +643,22 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x877>;
+ power-domains = <&zynqmp_firmware PD_ETH_3>;
};
gpio: gpio@ff0a0000 {
compatible = "xlnx,zynqmp-gpio-1.0";
status = "disabled";
#gpio-cells = <0x2>;
- gpio-controller;
interrupt-parent = <&gic>;
interrupts = <0 16 4>;
interrupt-controller;
#interrupt-cells = <2>;
reg = <0x0 0xff0a0000 0x0 0x1000>;
+ gpio-controller;
+ power-domains = <&zynqmp_firmware PD_GPIO>;
};
i2c0: i2c@ff020000 {
@@ -426,6 +669,7 @@
reg = <0x0 0xff020000 0x0 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
+ power-domains = <&zynqmp_firmware PD_I2C_0>;
};
i2c1: i2c@ff030000 {
@@ -436,6 +680,86 @@
reg = <0x0 0xff030000 0x0 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
+ power-domains = <&zynqmp_firmware PD_I2C_1>;
+ };
+
+ ocm: memory-controller@ff960000 {
+ compatible = "xlnx,zynqmp-ocmc-1.0";
+ reg = <0x0 0xff960000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 10 4>;
+ };
+
+ perf_monitor_ocm: perf-monitor@ffa00000 {
+ compatible = "xlnx,axi-perf-monitor";
+ reg = <0x0 0xffa00000 0x0 0x10000>;
+ interrupts = <0 25 4>;
+ interrupt-parent = <&gic>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <1>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <1>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <8>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
+ };
+
+ perf_monitor_ddr: perf-monitor@fd0b0000 {
+ compatible = "xlnx,axi-perf-monitor";
+ reg = <0x0 0xfd0b0000 0x0 0x10000>;
+ interrupts = <0 123 4>;
+ interrupt-parent = <&gic>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <6>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <0>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <10>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
+ };
+
+ perf_monitor_cci: perf-monitor@fd490000 {
+ compatible = "xlnx,axi-perf-monitor";
+ reg = <0x0 0xfd490000 0x0 0x10000>;
+ interrupts = <0 123 4>;
+ interrupt-parent = <&gic>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <1>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <0>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <8>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
+ };
+
+ perf_monitor_lpd: perf-monitor@ffa10000 {
+ compatible = "xlnx,axi-perf-monitor";
+ reg = <0x0 0xffa10000 0x0 0x10000>;
+ interrupts = <0 25 4>;
+ interrupt-parent = <&gic>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <1>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <1>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <8>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
};
pcie: pcie@fd0e0000 {
@@ -467,6 +791,7 @@
<0x0 0x0 0x0 0x2 &pcie_intc 0x2>,
<0x0 0x0 0x0 0x3 &pcie_intc 0x3>,
<0x0 0x0 0x0 0x4 &pcie_intc 0x4>;
+ power-domains = <&zynqmp_firmware PD_PCIE>;
pcie_intc: legacy-interrupt-controller {
interrupt-controller;
#address-cells = <0>;
@@ -474,6 +799,23 @@
};
};
+ qspi: spi@ff0f0000 {
+ u-boot,dm-pre-reloc;
+ compatible = "xlnx,zynqmp-qspi-1.0";
+ status = "disabled";
+ clock-names = "ref_clk", "pclk";
+ interrupts = <0 15 4>;
+ interrupt-parent = <&gic>;
+ num-cs = <1>;
+ reg = <0x0 0xff0f0000 0x0 0x1000>,
+ <0x0 0xc0000000 0x0 0x8000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x873>;
+ power-domains = <&zynqmp_firmware PD_QSPI>;
+ };
+
rtc: rtc@ffa60000 {
compatible = "xlnx,zynqmp-rtc";
status = "disabled";
@@ -484,43 +826,87 @@
calibration = <0x8000>;
};
+ serdes: zynqmp_phy@fd400000 {
+ compatible = "xlnx,zynqmp-psgtr-v1.1";
+ status = "disabled";
+ reg = <0x0 0xfd400000 0x0 0x40000>,
+ <0x0 0xfd3d0000 0x0 0x1000>;
+ reg-names = "serdes", "siou";
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
+ resets = <&zynqmp_reset ZYNQMP_RESET_SATA>,
+ <&zynqmp_reset ZYNQMP_RESET_USB0_CORERESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB1_CORERESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB0_HIBERRESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB1_HIBERRESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB0_APB>,
+ <&zynqmp_reset ZYNQMP_RESET_USB1_APB>,
+ <&zynqmp_reset ZYNQMP_RESET_DP>,
+ <&zynqmp_reset ZYNQMP_RESET_GEM0>,
+ <&zynqmp_reset ZYNQMP_RESET_GEM1>,
+ <&zynqmp_reset ZYNQMP_RESET_GEM2>,
+ <&zynqmp_reset ZYNQMP_RESET_GEM3>;
+ reset-names = "sata_rst", "usb0_crst", "usb1_crst",
+ "usb0_hibrst", "usb1_hibrst", "usb0_apbrst",
+ "usb1_apbrst", "dp_rst", "gem0_rst",
+ "gem1_rst", "gem2_rst", "gem3_rst";
+ lane0: lane0 {
+ #phy-cells = <4>;
+ };
+ lane1: lane1 {
+ #phy-cells = <4>;
+ };
+ lane2: lane2 {
+ #phy-cells = <4>;
+ };
+ lane3: lane3 {
+ #phy-cells = <4>;
+ };
+ };
+
sata: ahci@fd0c0000 {
compatible = "ceva,ahci-1v84";
status = "disabled";
reg = <0x0 0xfd0c0000 0x0 0x2000>;
interrupt-parent = <&gic>;
interrupts = <0 133 4>;
+ power-domains = <&zynqmp_firmware PD_SATA>;
+ #stream-id-cells = <4>;
+ /* iommus = <&smmu 0x4c0>, <&smmu 0x4c1>, */
+ /* <&smmu 0x4c2>, <&smmu 0x4c3>; */
+ /* dma-coherent; */
};
sdhci0: mmc@ff160000 {
- compatible = "arasan,sdhci-8.9a";
+ u-boot,dm-pre-reloc;
+ compatible = "xlnx,zynqmp-8.9a", "arasan,sdhci-8.9a";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 48 4>;
reg = <0x0 0xff160000 0x0 0x1000>;
clock-names = "clk_xin", "clk_ahb";
+ xlnx,device_id = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x870>;
+ power-domains = <&zynqmp_firmware PD_SD_0>;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
};
sdhci1: mmc@ff170000 {
- compatible = "arasan,sdhci-8.9a";
+ u-boot,dm-pre-reloc;
+ compatible = "xlnx,zynqmp-8.9a", "arasan,sdhci-8.9a";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 49 4>;
reg = <0x0 0xff170000 0x0 0x1000>;
clock-names = "clk_xin", "clk_ahb";
- };
-
- smmu: smmu@fd800000 {
- compatible = "arm,mmu-500";
- reg = <0x0 0xfd800000 0x0 0x20000>;
- status = "disabled";
- #global-interrupts = <1>;
- interrupt-parent = <&gic>;
- interrupts = <0 155 4>,
- <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
- <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
- <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
- <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>;
+ xlnx,device_id = <1>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x871>;
+ power-domains = <&zynqmp_firmware PD_SD_1>;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
};
spi0: spi@ff040000 {
@@ -532,6 +918,7 @@
clock-names = "ref_clk", "pclk";
#address-cells = <1>;
#size-cells = <0>;
+ power-domains = <&zynqmp_firmware PD_SPI_0>;
};
spi1: spi@ff050000 {
@@ -543,6 +930,7 @@
clock-names = "ref_clk", "pclk";
#address-cells = <1>;
#size-cells = <0>;
+ power-domains = <&zynqmp_firmware PD_SPI_1>;
};
ttc0: timer@ff110000 {
@@ -552,6 +940,7 @@
interrupts = <0 36 4>, <0 37 4>, <0 38 4>;
reg = <0x0 0xff110000 0x0 0x1000>;
timer-width = <32>;
+ power-domains = <&zynqmp_firmware PD_TTC_0>;
};
ttc1: timer@ff120000 {
@@ -561,6 +950,7 @@
interrupts = <0 39 4>, <0 40 4>, <0 41 4>;
reg = <0x0 0xff120000 0x0 0x1000>;
timer-width = <32>;
+ power-domains = <&zynqmp_firmware PD_TTC_1>;
};
ttc2: timer@ff130000 {
@@ -570,6 +960,7 @@
interrupts = <0 42 4>, <0 43 4>, <0 44 4>;
reg = <0x0 0xff130000 0x0 0x1000>;
timer-width = <32>;
+ power-domains = <&zynqmp_firmware PD_TTC_2>;
};
ttc3: timer@ff140000 {
@@ -579,42 +970,90 @@
interrupts = <0 45 4>, <0 46 4>, <0 47 4>;
reg = <0x0 0xff140000 0x0 0x1000>;
timer-width = <32>;
+ power-domains = <&zynqmp_firmware PD_TTC_3>;
};
uart0: serial@ff000000 {
+ u-boot,dm-pre-reloc;
compatible = "cdns,uart-r1p12", "xlnx,xuartps";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 21 4>;
reg = <0x0 0xff000000 0x0 0x1000>;
clock-names = "uart_clk", "pclk";
+ power-domains = <&zynqmp_firmware PD_UART_0>;
};
uart1: serial@ff010000 {
+ u-boot,dm-pre-reloc;
compatible = "cdns,uart-r1p12", "xlnx,xuartps";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 22 4>;
reg = <0x0 0xff010000 0x0 0x1000>;
clock-names = "uart_clk", "pclk";
+ power-domains = <&zynqmp_firmware PD_UART_1>;
};
- usb0: usb@fe200000 {
- compatible = "snps,dwc3";
+ usb0: usb0@ff9d0000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
status = "disabled";
- interrupt-parent = <&gic>;
- interrupts = <0 65 4>;
- reg = <0x0 0xfe200000 0x0 0x40000>;
- clock-names = "clk_xin", "clk_ahb";
+ compatible = "xlnx,zynqmp-dwc3";
+ reg = <0x0 0xff9d0000 0x0 0x100>;
+ clock-names = "bus_clk", "ref_clk";
+ power-domains = <&zynqmp_firmware PD_USB_0>;
+ ranges;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
+
+ dwc3_0: dwc3@fe200000 {
+ compatible = "snps,dwc3";
+ status = "disabled";
+ reg = <0x0 0xfe200000 0x0 0x40000>;
+ interrupt-parent = <&gic>;
+ interrupt-names = "dwc_usb3", "otg", "hiber";
+ interrupts = <0 65 4>, <0 69 4>, <0 75 4>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x860>;
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,refclk_fladj;
+ snps,enable_guctl1_resume_quirk;
+ snps,enable_guctl1_ipd_quirk;
+ snps,xhci-stream-quirk;
+ /* dma-coherent; */
+ /* snps,enable-hibernation; */
+ };
};
- usb1: usb@fe300000 {
- compatible = "snps,dwc3";
+ usb1: usb1@ff9e0000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
status = "disabled";
- interrupt-parent = <&gic>;
- interrupts = <0 70 4>;
- reg = <0x0 0xfe300000 0x0 0x40000>;
- clock-names = "clk_xin", "clk_ahb";
+ compatible = "xlnx,zynqmp-dwc3";
+ reg = <0x0 0xff9e0000 0x0 0x100>;
+ clock-names = "bus_clk", "ref_clk";
+ power-domains = <&zynqmp_firmware PD_USB_1>;
+ ranges;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
+
+ dwc3_1: dwc3@fe300000 {
+ compatible = "snps,dwc3";
+ status = "disabled";
+ reg = <0x0 0xfe300000 0x0 0x40000>;
+ interrupt-parent = <&gic>;
+ interrupt-names = "dwc_usb3", "otg", "hiber";
+ interrupts = <0 70 4>, <0 74 4>, <0 76 4>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x861>;
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,refclk_fladj;
+ snps,enable_guctl1_resume_quirk;
+ snps,enable_guctl1_ipd_quirk;
+ snps,xhci-stream-quirk;
+ /* dma-coherent; */
+ };
};
watchdog0: watchdog@fd4d0000 {
@@ -623,7 +1062,130 @@
interrupt-parent = <&gic>;
interrupts = <0 113 1>;
reg = <0x0 0xfd4d0000 0x0 0x1000>;
+ timeout-sec = <60>;
+ reset-on-timeout;
+ };
+
+ lpd_watchdog: watchdog@ff150000 {
+ compatible = "cdns,wdt-r1p2";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 52 1>;
+ reg = <0x0 0xff150000 0x0 0x1000>;
timeout-sec = <10>;
};
+
+ xilinx_ams: ams@ffa50000 {
+ compatible = "xlnx,zynqmp-ams";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 56 4>;
+ interrupt-names = "ams-irq";
+ reg = <0x0 0xffa50000 0x0 0x800>;
+ reg-names = "ams-base";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ #io-channel-cells = <1>;
+ ranges;
+
+ ams_ps: ams_ps@ffa50800 {
+ compatible = "xlnx,zynqmp-ams-ps";
+ status = "disabled";
+ reg = <0x0 0xffa50800 0x0 0x400>;
+ };
+
+ ams_pl: ams_pl@ffa50c00 {
+ compatible = "xlnx,zynqmp-ams-pl";
+ status = "disabled";
+ reg = <0x0 0xffa50c00 0x0 0x400>;
+ };
+ };
+
+ xlnx_dpdma: dma@fd4c0000 {
+ compatible = "xlnx,dpdma";
+ status = "disabled";
+ reg = <0x0 0xfd4c0000 0x0 0x1000>;
+ interrupts = <0 122 4>;
+ interrupt-parent = <&gic>;
+ clock-names = "axi_clk";
+ power-domains = <&zynqmp_firmware PD_DP>;
+ dma-channels = <6>;
+ #dma-cells = <1>;
+ dma-video0channel {
+ compatible = "xlnx,video0";
+ };
+ dma-video1channel {
+ compatible = "xlnx,video1";
+ };
+ dma-video2channel {
+ compatible = "xlnx,video2";
+ };
+ dma-graphicschannel {
+ compatible = "xlnx,graphics";
+ };
+ dma-audio0channel {
+ compatible = "xlnx,audio0";
+ };
+ dma-audio1channel {
+ compatible = "xlnx,audio1";
+ };
+ };
+
+ zynqmp_dpsub: zynqmp-display@fd4a0000 {
+ compatible = "xlnx,zynqmp-dpsub-1.7";
+ status = "disabled";
+ reg = <0x0 0xfd4a0000 0x0 0x1000>,
+ <0x0 0xfd4aa000 0x0 0x1000>,
+ <0x0 0xfd4ab000 0x0 0x1000>,
+ <0x0 0xfd4ac000 0x0 0x1000>;
+ reg-names = "dp", "blend", "av_buf", "aud";
+ interrupts = <0 119 4>;
+ interrupt-parent = <&gic>;
+
+ clock-names = "dp_apb_clk", "dp_aud_clk",
+ "dp_vtc_pixel_clk_in";
+
+ power-domains = <&zynqmp_firmware PD_DP>;
+
+ vid-layer {
+ dma-names = "vid0", "vid1", "vid2";
+ dmas = <&xlnx_dpdma 0>,
+ <&xlnx_dpdma 1>,
+ <&xlnx_dpdma 2>;
+ };
+
+ gfx-layer {
+ dma-names = "gfx0";
+ dmas = <&xlnx_dpdma 3>;
+ };
+
+ /* dummy node to indicate there's no child i2c device */
+ i2c-bus {
+ };
+
+ zynqmp_dp_snd_codec0: zynqmp_dp_snd_codec0 {
+ compatible = "xlnx,dp-snd-codec";
+ clock-names = "aud_clk";
+ };
+
+ zynqmp_dp_snd_pcm0: zynqmp_dp_snd_pcm0 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 4>;
+ dma-names = "tx";
+ };
+
+ zynqmp_dp_snd_pcm1: zynqmp_dp_snd_pcm1 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 5>;
+ dma-names = "tx";
+ };
+
+ zynqmp_dp_snd_card0: zynqmp_dp_snd_card {
+ compatible = "xlnx,dp-snd-card";
+ xlnx,dp-snd-pcm = <&zynqmp_dp_snd_pcm0>,
+ <&zynqmp_dp_snd_pcm1>;
+ xlnx,dp-snd-codec = <&zynqmp_dp_snd_codec0>;
+ };
+ };
};
};
diff --git a/arch/arm64/configs/xilinx_versal_defconfig b/arch/arm64/configs/xilinx_versal_defconfig
new file mode 100644
index 000000000000..227b2e4a966e
--- /dev/null
+++ b/arch/arm64/configs/xilinx_versal_defconfig
@@ -0,0 +1,178 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_NR_CPUS=8
+# CONFIG_EFI is not set
+CONFIG_COMPAT=y
+# CONFIG_SUSPEND is not set
+CONFIG_PM=y
+CONFIG_MODULES=y
+# CONFIG_SPARSEMEM_VMEMMAP is not set
+# CONFIG_COMPACTION is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_MARK_T=y
+CONFIG_BRIDGE=y
+CONFIG_CAN=y
+CONFIG_CAN_XILINXCAN=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=256
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT24=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_MACB=y
+CONFIG_DP83848_PHY=y
+CONFIG_DP83867_PHY=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_AMBA_PL010=y
+CONFIG_SERIAL_AMBA_PL010_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_CADENCE=y
+CONFIG_SPI=y
+CONFIG_SPI_CADENCE=y
+CONFIG_SPI_XILINX=y
+CONFIG_SPI_ZYNQMP_GQSPI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_ZYNQ=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_OTG=y
+CONFIG_USB_OTG_FSM=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_ZYNQMP=y
+CONFIG_DMADEVICES=y
+CONFIG_XILINX_ZYNQMP_DMA=y
+CONFIG_DMATEST=y
+CONFIG_UIO=y
+CONFIG_UIO_XILINX_AI_ENGINE=y
+CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
+CONFIG_ION_CARVEOUT_HEAP=y
+CONFIG_ION_CHUNK_HEAP=y
+CONFIG_ION_CMA_HEAP=y
+# CONFIG_COMMON_CLK_XGENE is not set
+CONFIG_COMMON_CLK_ZYNQMP=y
+# CONFIG_ARM_ARCH_TIMER_EVTSTREAM is not set
+# CONFIG_FSL_ERRATUM_A008585 is not set
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_CCI_PMU=y
+# CONFIG_ARM_PMU is not set
+CONFIG_ANDROID=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_BTRFS_FS=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arm64/configs/xilinx_zynqmp_defconfig b/arch/arm64/configs/xilinx_zynqmp_defconfig
new file mode 100644
index 000000000000..2be78e75a1e8
--- /dev/null
+++ b/arch/arm64/configs/xilinx_zynqmp_defconfig
@@ -0,0 +1,399 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_PCI=y
+CONFIG_PCIE_XILINX_NWL=y
+CONFIG_NR_CPUS=8
+# CONFIG_DMI is not set
+CONFIG_COMPAT=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_MARK_T=y
+CONFIG_BRIDGE=y
+CONFIG_NET_PKTGEN=y
+CONFIG_CAN=y
+CONFIG_CAN_XILINXCAN=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_LEDS=y
+CONFIG_BT_HCIBTUSB=y
+CONFIG_BT_HCIBTSDIO=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_BT_HCIUART_INTEL=y
+CONFIG_BT_HCIUART_QCA=y
+CONFIG_BT_HCIBCM203X=y
+CONFIG_BT_HCIBPA10X=y
+CONFIG_BT_HCIBFUSB=y
+CONFIG_BT_HCIVHCI=y
+CONFIG_BT_MRVL=y
+CONFIG_BT_MRVL_SDIO=y
+CONFIG_BT_ATH3K=y
+CONFIG_BT_WILINK=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_REG_RELAX_NO_IR=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_MESSAGE_TRACING=y
+CONFIG_MAC80211_DEBUG_MENU=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_GPIO=y
+CONFIG_NET_9P=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=256
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_TESTS=m
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ARASAN=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_OF_OVERLAY=y
+CONFIG_OF_CONFIGFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_XILINX_SDFEC=y
+CONFIG_XILINX_JESD204B=y
+CONFIG_XILINX_JESD204B_PHY=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_TI_ST=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+# CONFIG_ATA_SFF is not set
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_MACB=y
+CONFIG_XILINX_EMACLITE=y
+CONFIG_XILINX_AXI_EMAC=y
+CONFIG_AMD_PHY=y
+CONFIG_AT803X_PHY=y
+CONFIG_BCM7XXX_PHY=y
+CONFIG_BCM87XX_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_DP83867_PHY=y
+CONFIG_ICPLUS_PHY=y
+CONFIG_LSI_ET1011C_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_MARVELL_PHY=y
+CONFIG_MICREL_PHY=y
+CONFIG_NATIONAL_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_STE10XP=y
+CONFIG_VITESSE_PHY=y
+CONFIG_XILINX_GMII2RGMII=y
+CONFIG_USB_USBNET=y
+CONFIG_WL18XX=y
+CONFIG_WLCORE_SPI=y
+CONFIG_WLCORE_SDIO=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_MAX310X=y
+CONFIG_SERIAL_UARTLITE=y
+CONFIG_SERIAL_UARTLITE_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX_PCA9541=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_CADENCE=y
+CONFIG_I2C_XILINX=y
+CONFIG_SPI=y
+CONFIG_SPI_CADENCE=y
+CONFIG_SPI_XILINX=y
+CONFIG_SPI_ZYNQMP_GQSPI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_XILINX=y
+CONFIG_GPIO_ZYNQ=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_TPS65086=y
+CONFIG_POWER_RESET_LTC2952=y
+CONFIG_SENSORS_IIO_HWMON=y
+CONFIG_PMBUS=y
+CONFIG_SENSORS_MAX20751=y
+CONFIG_SENSORS_INA2XX=y
+CONFIG_WATCHDOG=y
+CONFIG_XILINX_WATCHDOG=y
+CONFIG_CADENCE_WATCHDOG=y
+CONFIG_MFD_TPS65086=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_TPS65086=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_XILINX=y
+CONFIG_VIDEO_XILINX_AXI4S_SWITCH=y
+CONFIG_VIDEO_XILINX_CFA=y
+CONFIG_VIDEO_XILINX_CRESAMPLE=y
+CONFIG_VIDEO_XILINX_DEMOSAIC=y
+CONFIG_VIDEO_XILINX_GAMMA=y
+CONFIG_VIDEO_XILINX_HLS=y
+CONFIG_VIDEO_XILINX_REMAPPER=y
+CONFIG_VIDEO_XILINX_RGB2YUV=y
+CONFIG_VIDEO_XILINX_SCALER=y
+CONFIG_VIDEO_XILINX_MULTISCALER=y
+CONFIG_VIDEO_XILINX_SDIRXSS=y
+CONFIG_VIDEO_XILINX_SWITCH=y
+CONFIG_VIDEO_XILINX_TPG=y
+CONFIG_VIDEO_XILINX_VPSS_CSC=y
+CONFIG_VIDEO_XILINX_VPSS_SCALER=y
+CONFIG_VIDEO_XILINX_CSI2RXSS=y
+CONFIG_VIDEO_XILINX_SCD=y
+CONFIG_VIDEO_XILINX_M2M=y
+# CONFIG_VGA_ARB is not set
+CONFIG_DRM=y
+CONFIG_DRM_XILINX=y
+CONFIG_DRM_XILINX_SDI=y
+CONFIG_DRM_XLNX=y
+CONFIG_DRM_XLNX_BRIDGE=y
+CONFIG_DRM_XLNX_BRIDGE_DEBUG_FS=y
+CONFIG_DRM_ZYNQMP_DPSUB=y
+CONFIG_DRM_XLNX_DSI=y
+CONFIG_DRM_XLNX_MIXER=y
+CONFIG_DRM_XLNX_PL_DISP=y
+CONFIG_DRM_XLNX_SDI=y
+CONFIG_DRM_XLNX_BRIDGE_CSC=y
+CONFIG_DRM_XLNX_BRIDGE_SCALER=y
+CONFIG_FB_XILINX=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_PCI is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_XILINX_DP=y
+CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=y
+CONFIG_SND_SOC_XILINX_SDI=y
+CONFIG_SND_SOC_XILINX_I2S=y
+CONFIG_SND_SOC_XILINX_SPDIF=y
+CONFIG_SND_SOC_XILINX_PL_SND_CARD=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+# CONFIG_USB_DEFAULT_PERSIST is not set
+CONFIG_USB_OTG=y
+CONFIG_USB_OTG_FSM=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_XILINX=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_EEM=y
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_LEDS_TRIGGER_CAMERA=y
+CONFIG_EDAC=y
+CONFIG_EDAC_SYNOPSYS=y
+CONFIG_EDAC_ZYNQMP_OCM=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_ZYNQMP=y
+CONFIG_DMADEVICES=y
+CONFIG_XILINX_DMA_ENGINES=y
+CONFIG_XILINX_DMA=y
+CONFIG_XILINX_ZYNQMP_DMA=y
+CONFIG_DMATEST=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_DMEM_GENIRQ=m
+CONFIG_UIO_XILINX_APM=y
+CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
+CONFIG_ION_CARVEOUT_HEAP=y
+CONFIG_ION_CHUNK_HEAP=y
+CONFIG_ION_CMA_HEAP=y
+CONFIG_COMMON_CLK_XLNX_CLKWZRD=y
+CONFIG_XILINX_FCLK=y
+CONFIG_XLNX_CTRL_FRMBUF=y
+CONFIG_XLNX_CTRL_VPSS=y
+CONFIG_COMMON_CLK_SI570=y
+CONFIG_COMMON_CLK_SI5324=y
+# CONFIG_COMMON_CLK_XGENE is not set
+CONFIG_COMMON_CLK_ZYNQMP=y
+CONFIG_ARM_SMMU=y
+CONFIG_REMOTEPROC=y
+CONFIG_ZYNQMP_R5_REMOTEPROC=m
+CONFIG_XILINX_VCU=m
+CONFIG_IIO=y
+CONFIG_XILINX_XADC=y
+CONFIG_XILINX_AMS=y
+CONFIG_XILINX_INTC=y
+CONFIG_RESET_CONTROLLER=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_NVMEM_ZYNQMP=y
+CONFIG_FPGA=y
+CONFIG_XILINX_AFI_FPGA=y
+CONFIG_FPGA_BRIDGE=y
+CONFIG_XILINX_PR_DECOUPLER=y
+CONFIG_FPGA_REGION=y
+CONFIG_OF_FPGA_REGION=y
+CONFIG_FPGA_MGR_ZYNQMP_FPGA=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_BTRFS_FS=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_CRYPTO_DEV_ZYNQMP_SHA3=y
+CONFIG_CRYPTO_DEV_XILINX_RSA=y
+CONFIG_CRYPTO_DEV_ZYNQMP_AES=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index f11433daab4a..5063545e744c 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -42,6 +42,7 @@ config MICROBLAZE
select TRACING_SUPPORT
select VIRT_TO_BUS
select CPU_NO_EFFICIENT_FFS
+ select SPARSE_IRQ
select MMU_GATHER_NO_RANGE if MMU
# Endianness selection
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index 92fd4e95b488..47ad2aa03fa7 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -5,15 +5,10 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_KALLSYMS_ALL=y
# CONFIG_BASE_FULL is not set
+CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_EFI_PARTITION is not set
CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
@@ -27,12 +22,19 @@ CONFIG_CMDLINE_FORCE=y
CONFIG_HIGHMEM=y
CONFIG_PCI=y
CONFIG_PCI_XILINX=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_EFI_PARTITION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
CONFIG_BRIDGE=m
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -70,12 +72,14 @@ CONFIG_UIO_PDRV_GENIRQ=y
CONFIG_UIO_DMEM_GENIRQ=y
CONFIG_EXT2_FS=y
# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_ROMFS_FS=y
CONFIG_NFS_FS=y
CONFIG_CIFS=y
-CONFIG_CIFS_STATS=y
CONFIG_CIFS_STATS2=y
+CONFIG_KEYS=y
+CONFIG_ENCRYPTED_KEYS=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SLAB=y
CONFIG_DETECT_HUNG_TASK=y
@@ -83,6 +87,3 @@ CONFIG_DEBUG_SPINLOCK=y
CONFIG_KGDB=y
CONFIG_KGDB_TESTS=y
CONFIG_KGDB_KDB=y
-CONFIG_EARLY_PRINTK=y
-CONFIG_KEYS=y
-CONFIG_ENCRYPTED_KEYS=y
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index 06d69a6e192d..45f2e6bb55d1 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -7,15 +7,10 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_KALLSYMS_ALL=y
# CONFIG_BASE_FULL is not set
+CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_EFI_PARTITION is not set
CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
@@ -27,6 +22,11 @@ CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE_FORCE=y
CONFIG_PCI=y
CONFIG_PCI_XILINX=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_EFI_PARTITION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -75,11 +75,6 @@ CONFIG_ROMFS_FS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NLS=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_EARLY_PRINTK=y
CONFIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=y
CONFIG_CRYPTO_ECB=y
@@ -87,3 +82,7 @@ CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_DES=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_SLAB=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_SPINLOCK=y
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index e5c9170a07fc..83417105c00a 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -25,6 +25,7 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
+generic-y += msi.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index d785defeeed5..eac2fb4b3fb9 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -9,7 +9,6 @@
#ifndef _ASM_MICROBLAZE_IRQ_H
#define _ASM_MICROBLAZE_IRQ_H
-#define NR_IRQS (32 + 1)
#include <asm-generic/irq.h>
struct pt_regs;
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index f264fdcf152a..7d2894418691 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -99,7 +99,7 @@ big_endian:
_prepare_copy_fdt:
or r11, r0, r0 /* incremment */
ori r4, r0, TOPHYS(_fdt_start)
- ori r3, r0, (0x8000 - 4)
+ ori r3, r0, (0x10000 - 4)
_copy_fdt:
lw r12, r7, r11 /* r12 = r7 + r11 */
sw r12, r4, r11 /* addr[r4 + r11] = r12 */
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index e1f3e8741292..71072c5cf61f 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -46,7 +46,7 @@ SECTIONS {
__fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) {
_fdt_start = . ; /* place for fdt blob */
*(__fdt_blob) ; /* Any link-placed DTB */
- . = _fdt_start + 0x8000; /* Pad up to 32kbyte */
+ . = _fdt_start + 0x10000; /* Pad up to 64kbyte */
_fdt_end = . ;
}
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 58cc4965bd3e..d3c28ffd87db 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -141,7 +141,7 @@ struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
return NULL;
}
-void pcibios_set_master(struct pci_dev *dev)
+void __weak pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
@@ -559,37 +559,7 @@ int pci_proc_domain(struct pci_bus *bus)
*/
static void pcibios_fixup_resources(struct pci_dev *dev)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- int i;
- if (!hose) {
- pr_err("No host bridge for PCI dev %s !\n",
- pci_name(dev));
- return;
- }
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- struct resource *res = dev->resource + i;
- if (!res->flags)
- continue;
- if (res->start == 0) {
- pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]",
- pci_name(dev), i,
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned int)res->flags);
- pr_debug("is unassigned\n");
- res->end -= res->start;
- res->start = 0;
- res->flags |= IORESOURCE_UNSET;
- continue;
- }
-
- pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
- pci_name(dev), i,
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned int)res->flags);
- }
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 1d4b0157ee5d..1880336bba39 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -255,6 +255,13 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
if (!type->setauthsize)
goto unlock;
err = type->setauthsize(ask->private, optlen);
+ break;
+ case ALG_SET_KEY_TYPE:
+ if (sock->state == SS_CONNECTED)
+ goto unlock;
+ if (!type->setkeytype)
+ goto unlock;
+ err = type->setkeytype(ask->private, optval, optlen);
}
unlock:
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 81c4022285a7..36c8a0d14797 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -309,6 +309,12 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
return crypto_skcipher_setkey(private, key, keylen);
}
+static int skcipher_setkeytype(void *private, const u8 *key,
+ unsigned int keylen)
+{
+ return crypto_skcipher_setkeytype(private, key, keylen);
+}
+
static void skcipher_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
@@ -368,6 +374,7 @@ static const struct af_alg_type algif_type_skcipher = {
.bind = skcipher_bind,
.release = skcipher_release,
.setkey = skcipher_setkey,
+ .setkeytype = skcipher_setkeytype,
.accept = skcipher_accept_parent,
.accept_nokey = skcipher_accept_parent_nokey,
.ops = &algif_skcipher_ops,
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 48a33817de11..c051723d9cec 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -403,6 +403,14 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
return cipher->setkey(tfm, key, keylen);
}
+static int setkeytype(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
+
+ return cipher->setkeytype(tfm, key, keylen);
+}
+
static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
@@ -473,6 +481,7 @@ static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
unsigned long addr;
crt->setkey = setkey;
+ crt->setkeytype = setkeytype;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 333df4824753..c978f1282662 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -617,6 +617,23 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
return 0;
}
+static int skcipher_setkeytype_blkcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_blkcipher *blkcipher = *ctx;
+ int err;
+
+ crypto_blkcipher_clear_flags(blkcipher, ~0);
+ crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_blkcipher_setkeytype(blkcipher, key, keylen);
+ crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
+ CRYPTO_TFM_RES_MASK);
+
+ return err;
+}
+
static int skcipher_crypt_blkcipher(struct skcipher_request *req,
int (*crypt)(struct blkcipher_desc *,
struct scatterlist *,
@@ -683,6 +700,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
tfm->exit = crypto_exit_skcipher_ops_blkcipher;
skcipher->setkey = skcipher_setkey_blkcipher;
+ skcipher->setkeytype = skcipher_setkeytype_blkcipher;
skcipher->encrypt = skcipher_encrypt_blkcipher;
skcipher->decrypt = skcipher_decrypt_blkcipher;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index c04f5f9e1ed0..404f0e3e1b76 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -606,6 +606,7 @@ static int ll_setup(struct hci_uart *hu)
gpiod_set_value_cansleep(lldev->enable_gpio, 0);
msleep(5);
gpiod_set_value_cansleep(lldev->enable_gpio, 1);
+ msleep(500);
err = serdev_device_wait_for_cts(serdev, true, 200);
if (err) {
bt_dev_err(hu->hdev, "Failed to get CTS");
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index fc1e0cf44995..02d661026827 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -129,6 +129,39 @@ config COMMON_CLK_SI570
This driver supports Silicon Labs 570/571/598/599 programmable
clock generators.
+config COMMON_CLK_SI5324
+ tristate "Clock driver for SiLabs 5324 and compatible devices"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ ---help--
+ This driver supports Silicon Labs 5324/5319/5328 programmable
+ clock generators. Dynamic programming of the oscillator is done
+ via I2C.
+
+config COMMON_CLK_IDT8T49N24X
+ tristate "Clock driver for IDT 8T49N24x"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ ---help---
+ This driver supports the IDT 8T49N24x universal frequency translator
+ product family. The only chip in the family that is currently
+ supported is the 8T49N241. The driver supports setting the rate for
+ all four outputs on the chip and automatically calculating/setting
+ the appropriate VCO value.
+
+ The driver can read a full register map from the DT,
+ and will use that register map to initialize the attached part
+ (via I2C) when the system boots. Any configuration not supported
+ by the common clock framework must be done via the full register
+ map, including optimized settings.
+
+ All outputs are currently assumed to be LVDS, unless overridden
+ in the full register map in the DT.
+
config COMMON_CLK_CDCE706
tristate "Clock driver for TI CDCE706 clock synthesizer"
depends on I2C
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 9ef4305d55e0..c0a784bc56b9 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
obj-$(CONFIG_COMMON_CLK_STM32F) += clk-stm32f4.o
obj-$(CONFIG_COMMON_CLK_STM32H7) += clk-stm32h7.o
obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o
+obj-$(CONFIG_COMMON_CLK_SI5324) += si5324drv.o clk-si5324.o
obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_ARCH_U300) += clk-u300.o
@@ -75,6 +76,7 @@ obj-$(CONFIG_ARCH_BERLIN) += berlin/
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
obj-$(CONFIG_H8300) += h8300/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
+obj-$(CONFIG_COMMON_CLK_IDT8T49N24X) += idt/
obj-y += imgtec/
obj-y += imx/
obj-y += ingenic/
@@ -115,4 +117,4 @@ obj-$(CONFIG_X86) += x86/
endif
obj-$(CONFIG_ARCH_ZX) += zte/
obj-$(CONFIG_ARCH_ZYNQ) += zynq/
-obj-$(CONFIG_COMMON_CLK_ZYNQMP) += zynqmp/
+obj-$(CONFIG_COMMON_CLK_ZYNQMP) += zynqmp/
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 8b343e59dc61..4ef027f62880 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -211,8 +211,9 @@ void __init of_fixed_factor_clk_setup(struct device_node *node)
{
_of_fixed_factor_clk_setup(node);
}
-CLK_OF_DECLARE(fixed_factor_clk, "fixed-factor-clock",
- of_fixed_factor_clk_setup);
+
+CLK_OF_DECLARE_DRIVER(fixed_factor_clk, "fixed-factor-clock",
+ of_fixed_factor_clk_setup);
static int of_fixed_factor_clk_remove(struct platform_device *pdev)
{
diff --git a/drivers/clk/clk-si5324.c b/drivers/clk/clk-si5324.c
new file mode 100644
index 000000000000..7cfe75d7e6a4
--- /dev/null
+++ b/drivers/clk/clk-si5324.c
@@ -0,0 +1,1227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * clk-si5324.c - Si5324 clock driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar Rao G <vgannava.xilinx.com>
+ * Leon Woestenberg <leon@sidebranch.com>
+ */
+
+#include <asm/div64.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/rational.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "clk-si5324.h"
+#include "si5324.h"
+#include "si5324drv.h"
+
+struct si5324_driver_data;
+
+/**
+ * struct si5324_parameters - si5324 core parameters
+ *
+ * @n1_hs_min: Minimum high-speed n1 output divider
+ * @n1_hs_max: Maximum high-speed n1 output divider
+ * @n1_hs: n1 high-speed output divider
+ * @nc1_ls_min: Minimum low-speed clkout1 output divider
+ * @nc1_ls_max: Maximum low-speed clkout1 output divider
+ * @nc1_ls: Clkout1 low-speed output divider
+ * @nc2_ls_min: Minimum low-speed clkout2 output divider
+ * @nc2_ls_max: Maximum low-speed clkout2 output divider
+ * @nc2_ls: Clkout2 low-speed output divider
+ * @n2_hs: High-speed feedback divider
+ * @n2_ls_min: Minimum low-speed feedback divider
+ * @n2_ls_max: Maximum low-speed feedback divider
+ * @n2_ls: Low-speed feedback divider
+ * @n31_min: Minimum input divider for clk1
+ * @n31_max: Maximum input divider for clk1
+ * @n31: Input divider for clk1
+ * @n32_min: Minimum input divider for clk2
+ * @n32_max: Maximum input divider for clk2
+ * @n32: Input divider for clk2
+ * @fin: Input frequency
+ * @fout: Output frequency
+ * @fosc: Osc frequency
+ * @best_delta_fout: Delta out frequency
+ * @best_fout: Best output frequency
+ * @best_n1_hs: Best high speed output divider
+ * @best_nc1_ls: Best low speed clkout1 divider
+ * @best_n2_hs: Best high speed feedback divider
+ * @best_n2_ls: Best low speed feedback divider
+ * @best_n3: Best input clock divider
+ * @valid: Validility
+ */
+struct si5324_parameters {
+ u32 n1_hs_min;
+ u32 n1_hs_max;
+ u32 n1_hs;
+ u32 nc1_ls_min;
+ u32 nc1_ls_max;
+ u32 nc1_ls;
+ u32 nc2_ls_min;
+ u32 nc2_ls_max;
+ u32 nc2_ls;
+ u32 n2_hs;
+ u32 n2_ls_min;
+ u32 n2_ls_max;
+ u32 n2_ls;
+ u32 n31_min;
+ u32 n31_max;
+ u32 n31;
+ u32 n32_min;
+ u32 n32_max;
+ u32 n32;
+ u64 fin;
+ u64 fout;
+ u64 fosc;
+ u64 best_delta_fout;
+ u64 best_fout;
+ u32 best_n1_hs;
+ u32 best_nc1_ls;
+ u32 best_n2_hs;
+ u32 best_n2_ls;
+ u32 best_n3;
+ int valid;
+};
+
+/**
+ * struct si5324_hw_data - Clock parameters
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ * @drvdata: Driver private data
+ * @num: Differential pair clock number
+ */
+struct si5324_hw_data {
+ struct clk_hw hw;
+ struct si5324_driver_data *drvdata;
+ unsigned char num;
+};
+
+/**
+ * struct si5324_driver_data - Driver parameters
+ * @client: I2C client pointer
+ * @regmap: Device's regmap
+ * @onecell: Clock onecell data
+ * @params: Device parameters
+ * @pxtal: Clock
+ * @pxtal_name: Clock name
+ * @xtal: Reference clock
+ * @pclkin1: Clock in 1
+ * @pclkin1_name: Clock in 1 name
+ * @clkin1: Differential input clock 1
+ * @pclkin2: Clock in 2
+ * @pclkin2_name: Clock in 2 name
+ * @clkin2: Differential input clock 2
+ * @pll: Pll clock
+ * @clkout: Output clock
+ * @rate_clkout0: Clock out 0 rate
+ * @rate_clkout1: Clock out 1 rate
+ */
+struct si5324_driver_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct clk_onecell_data onecell;
+ struct si5324_parameters params;
+ struct clk *pxtal;
+ const char *pxtal_name;
+ struct clk_hw xtal;
+ struct clk *pclkin1;
+ const char *pclkin1_name;
+ struct clk_hw clkin1;
+ struct clk *pclkin2;
+ const char *pclkin2_name;
+ struct clk_hw clkin2;
+ struct si5324_hw_data pll;
+ struct si5324_hw_data *clkout;
+ unsigned long rate_clkout0;
+ unsigned long rate_clkout1;
+};
+
+static const char * const si5324_input_names[] = {
+ "xtal", "clkin1", "clkin2"
+};
+
+static const char * const si5324_pll_name = "pll";
+
+static const char * const si5324_clkout_names[] = {
+ "clk0", "clk1"
+};
+
+enum si53xx_variant {
+ si5319,
+ si5324,
+ si5328
+};
+
+static const char * const si53xx_variant_name[] = {
+ "si5319", "si5324", "si5328"
+};
+
+/**
+ * si5324_reg_read - Read a single si5324 register.
+ *
+ * @drvdata: Device to read from.
+ * @reg: Register to read.
+ *
+ * This function reads data from a single register
+ *
+ * Return: Data of the register on success, error number on failure
+ */
+static inline int
+si5324_reg_read(struct si5324_driver_data *drvdata, u8 reg)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(drvdata->regmap, reg, &val);
+ if (ret < 0) {
+ dev_err(&drvdata->client->dev,
+ "unable to read from reg%02x\n", reg);
+ return ret;
+ }
+
+ return (u8)val;
+}
+
+/**
+ * si5324_bulk_read - Read multiple si5324 registers
+ *
+ * @drvdata: Device to read from
+ * @reg: First register to be read from
+ * @count: Number of registers
+ * @buf: Pointer to store read value
+ *
+ * This function reads from multiple registers which are in
+ * sequential order
+ *
+ * Return: Number of bytes read
+ */
+static inline int si5324_bulk_read(struct si5324_driver_data *drvdata,
+ u8 reg, u8 count, u8 *buf)
+{
+ return regmap_bulk_read(drvdata->regmap, reg, buf, count);
+}
+
+/**
+ * si5324_reg_write - Write a single si5324 register.
+ *
+ * @drvdata: Device to write to.
+ * @reg: Register to write to.
+ * @val: Value to write.
+ *
+ * This function writes into a single register
+ *
+ * Return: Zero on success, a negative error number on failure.
+ *
+ */
+static inline int si5324_reg_write(struct si5324_driver_data *drvdata,
+ u8 reg, u8 val)
+{
+ int ret = regmap_write(drvdata->regmap, reg, val);
+
+ dev_dbg(&drvdata->client->dev, "%s 0x%02x @%02d\n", __func__,
+ (int)val, (int)reg);
+ return ret;
+}
+
+/**
+ * si5324_bulk_write - Write into multiple si5324 registers
+ *
+ * @drvdata: Device to write to
+ * @reg: First register
+ * @count: Number of registers
+ * @buf: Block of data to be written
+ *
+ * This function writes into multiple registers.
+ *
+ * Return: Zero on success, a negative error number on failure.
+ */
+static inline int si5324_bulk_write(struct si5324_driver_data *drvdata,
+ u8 reg, u8 count, const u8 *buf)
+{
+ return regmap_raw_write(drvdata->regmap, reg, buf, count);
+}
+
+/**
+ * si5324_set_bits - Set the value of a bitfield in a si5324 register
+ *
+ * @drvdata: Device to write to.
+ * @reg: Register to write to.
+ * @mask: Mask of bits to set.
+ * @val: Value to set (unshifted)
+ *
+ * This function set particular bits in register
+ *
+ * Return: Zero on success, a negative error number on failure.
+ */
+static inline int si5324_set_bits(struct si5324_driver_data *drvdata,
+ u8 reg, u8 mask, u8 val)
+{
+ return regmap_update_bits(drvdata->regmap, reg, mask, val);
+}
+
+/**
+ * si5324_bulk_scatter_write - Write into multiple si5324 registers
+ *
+ * @drvdata: Device to write to
+ * @count: Number of registers
+ * @buf: Register and data to write
+ *
+ * This function writes into multiple registers which are need not
+ * to be in sequential order.
+ *
+ * Return: Number of bytes written
+ */
+static inline int
+si5324_bulk_scatter_write(struct si5324_driver_data *drvdata,
+ u8 count, const u8 *buf)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < count; i++) {
+ ret = si5324_reg_write(drvdata, buf[i * 2], buf[i * 2 + 1]);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+/**
+ * si5324_initialize - Initializes si5324 device
+ *
+ * @drvdata: Device instance
+ *
+ * This function initializes si5324 with the following settings
+ * Keep reset asserted for 20ms
+ * 1. freerun mode
+ * 2. Disable output clocks during calibration
+ * 3. Clock selection mode : default value, manual
+ * 4. output signal format : LVDS for clkout1, disable clkout2
+ * 5. CS_CA pin in ignored
+ * 6. Set lock time to 13.3ms
+ * 7. Enables the fastlock.
+ *
+ * Return: Zero on success, negative number on failure.
+ */
+static int si5324_initialize(struct si5324_driver_data *drvdata)
+{
+ int ret = 0;
+
+ si5324_set_bits(drvdata, SI5324_RESET_CALIB,
+ SI5324_RST_ALL, SI5324_RST_ALL);
+ msleep(SI5324_RESET_DELAY_MS);
+ si5324_set_bits(drvdata, SI5324_RESET_CALIB, SI5324_RST_ALL, 0);
+ msleep(SI5324_RESET_DELAY_MS);
+
+ ret = si5324_reg_read(drvdata, SI5324_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_CONTROL,
+ (ret | SI5324_CONTROL_FREE_RUN));
+
+ ret = si5324_reg_read(drvdata, SI5324_CKSEL);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_CKSEL, (ret | SI5324_CKSEL_SQL_ICAL));
+ si5324_reg_write(drvdata, SI3324_AUTOSEL, SI5324_AUTOSEL_DEF);
+ si5324_reg_write(drvdata, SI5324_OUTPUT_SIGFMT,
+ SI5324_OUTPUT_SF1_DEFAULT);
+
+ ret = si5324_reg_read(drvdata, SI5324_DSBL_CLKOUT);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_DSBL_CLKOUT,
+ (ret | SI5324_DSBL_CLKOUT2));
+ ret = si5324_reg_read(drvdata, SI5324_POWERDOWN);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_POWERDOWN, (ret | SI5324_PD_CK2));
+ si5324_reg_write(drvdata, SI5324_FOS_LOCKT, SI5324_FOS_DEFAULT);
+
+ ret = si5324_reg_read(drvdata, SI5324_CK_ACTV_SEL);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_CK_ACTV_SEL, SI5324_CK_DEFAULT);
+ ret = si5324_reg_read(drvdata, SI5324_FASTLOCK);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_FASTLOCK, (ret | SI5324_FASTLOCK_EN));
+ return 0;
+}
+
+/**
+ * si5324_read_parameters - Reads clock divider parameters
+ *
+ * @drvdata: Device to read from
+ *
+ * This function reads the clock divider parameters into driver structure.
+ *
+ * Following table gives the buffer index, register number and
+ * register name with bit fields
+ * 0 25 N1_HS[2:0]
+ * 6 31 NC1_LS[19:16]
+ * 7 32 NC1_LS[15:8]
+ * 8 33 NC1_LS[7:0]
+ * 9 34 NC2_LS[19:16]
+ * 10 35 NC2_LS[15:8]
+ * 11 36 NC2_LS[7:0]
+ * 15 40 N2_HS[2:0] N2_LS[19:16]
+ * 16 41 N2_LS[15:8]
+ * 17 42 N2_LS[7:0]
+ * 18 43 N31[18:16]
+ * 19 44 N31[15:8]
+ * 20 45 N31[7:0]
+ * 21 46 N32[18:16]
+ * 22 47 N32[15:8]
+ * 23 48 N32[7:0]
+ */
+static void si5324_read_parameters(struct si5324_driver_data *drvdata)
+{
+ u8 buf[SI5324_PARAM_LEN];
+
+ si5324_bulk_read(drvdata, SI5324_N1_HS, SI5324_N1_PARAM_LEN, &buf[0]);
+ si5324_bulk_read(drvdata, SI5324_NC1_LS_H, SI5324_NC_PARAM_LEN,
+ &buf[6]);
+ si5324_bulk_read(drvdata, SI5324_N2_HS_LS_H, SI5324_N2_PARAM_LEN,
+ &buf[15]);
+
+ drvdata->params.n1_hs = (buf[0] >> SI5324_N1_HS_VAL_SHIFT);
+ drvdata->params.n1_hs += 4;
+
+ drvdata->params.nc1_ls = ((buf[6] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[7] << SI5324_LSHIFT) |
+ buf[8];
+ drvdata->params.nc1_ls += 1;
+ drvdata->params.nc2_ls = ((buf[9] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[10] << SI5324_LSHIFT) |
+ buf[11];
+ drvdata->params.nc2_ls += 1;
+ drvdata->params.n2_ls = ((buf[15] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[16] << SI5324_LSHIFT) |
+ buf[17];
+ drvdata->params.n2_ls += 1;
+ drvdata->params.n2_hs = buf[15] >> SI5324_N2_HS_LS_H_VAL_SHIFT;
+ drvdata->params.n2_hs += 4;
+ drvdata->params.n31 = ((buf[18] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[19] << SI5324_LSHIFT) |
+ buf[20];
+ drvdata->params.n31 += 1;
+ drvdata->params.n32 = ((buf[21] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[22] << SI5324_LSHIFT) |
+ buf[23];
+ drvdata->params.n32 += 1;
+ drvdata->params.valid = 1;
+}
+
+static bool si5324_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ return true;
+}
+
+/**
+ * si5324_regmap_is_readable - Checks the register is readable or not
+ *
+ * @dev: Registered device
+ * @reg: Register offset
+ *
+ * Checks the register is readable or not.
+ *
+ * Return: True if the register is reabdle, False if it is not readable.
+ */
+static bool si5324_regmap_is_readable(struct device *dev, unsigned int reg)
+{
+ if ((reg > SI5324_POWERDOWN && reg < SI5324_FOS_LOCKT) ||
+ (reg > SI5324_N1_HS && reg < SI5324_NC1_LS_H) ||
+ (reg > SI5324_NC2_LS_L && reg < SI5324_N2_HS_LS_H) ||
+ (reg > SI5324_N32_CLKIN_L && reg < SI5324_FOS_CLKIN_RATE) ||
+ (reg > SI5324_FOS_CLKIN_RATE && reg < SI5324_PLL_ACTV_CLK) ||
+ reg > SI5324_SKEW2)
+ return false;
+
+ return true;
+}
+
+/**
+ * si5324_regmap_is_writable - Checks the register is writable or not
+ *
+ * @dev: Registered device
+ * @reg: Register offset
+ *
+ * Checks the register is writable or not.
+ *
+ * Return: True if the register is writeable, False if it's not writeable.
+ */
+static bool si5324_regmap_is_writeable(struct device *dev, unsigned int reg)
+{
+ if ((reg > SI5324_POWERDOWN && reg < SI5324_FOS_LOCKT) ||
+ (reg > SI5324_N1_HS && reg < SI5324_NC1_LS_H) ||
+ (reg > SI5324_NC2_LS_L && reg < SI5324_N2_HS_LS_H) ||
+ (reg > SI5324_N32_CLKIN_L && reg < SI5324_FOS_CLKIN_RATE) ||
+ (reg > SI5324_FOS_CLKIN_RATE && reg < SI5324_PLL_ACTV_CLK) ||
+ reg > SI5324_SKEW2 ||
+ (reg >= SI5324_PLL_ACTV_CLK && reg <= SI5324_CLKIN_LOL_STATUS) ||
+ (reg >= SI5324_PARTNO_H && reg <= SI5324_PARTNO_L))
+ return false;
+
+ return true;
+}
+
+static const struct regmap_config si5324_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 144,
+ .writeable_reg = si5324_regmap_is_writeable,
+ .readable_reg = si5324_regmap_is_readable,
+ .volatile_reg = si5324_regmap_is_volatile,
+};
+
+static int si5324_xtal_prepare(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static void si5324_xtal_unprepare(struct clk_hw *hw)
+{
+}
+
+static const struct clk_ops si5324_xtal_ops = {
+ .prepare = si5324_xtal_prepare,
+ .unprepare = si5324_xtal_unprepare,
+};
+
+/**
+ * si5324_clkin_prepare - Prepare the clkin
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ *
+ * This function enables the particular clk
+ *
+ * Return: Zero on success, a negative error number on failure.
+ */
+static int si5324_clkin_prepare(struct clk_hw *hw)
+{
+ int ret = 0;
+ struct si5324_driver_data *drvdata;
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ if (hwdata->num == SI5324_CLKIN1) {
+ drvdata = container_of(hw, struct si5324_driver_data, clkin1);
+ ret = si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN, 0);
+ ret = si5324_set_bits(drvdata, SI5324_POWERDOWN, SI5324_PD_CK1 |
+ SI5324_PD_CK2, SI5324_PD_CK2);
+ } else if (hwdata->num == SI5324_CLKIN2) {
+ drvdata = container_of(hw, struct si5324_driver_data, clkin2);
+ ret = si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN, 0);
+ ret = si5324_set_bits(drvdata, SI5324_POWERDOWN, SI5324_PD_CK1 |
+ SI5324_PD_CK2, SI5324_PD_CK1);
+ }
+
+ return ret;
+}
+
+/**
+ * si5324_clkin_unprepare - Unprepare the clkin
+ *
+ * @hw: Clock hardware
+ *
+ * This function enables the particular clk.
+ */
+static void si5324_clkin_unprepare(struct clk_hw *hw)
+{
+ struct si5324_driver_data *drvdata;
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ if (hwdata->num == SI5324_CLKIN1) {
+ drvdata = container_of(hw, struct si5324_driver_data, clkin1);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK1);
+ } else if (hwdata->num == SI5324_CLKIN2) {
+ drvdata = container_of(hw, struct si5324_driver_data, clkin2);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK1);
+ }
+}
+
+static unsigned long si5324_clkin_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+static const struct clk_ops si5324_clkin_ops = {
+ .prepare = si5324_clkin_prepare,
+ .unprepare = si5324_clkin_unprepare,
+ .recalc_rate = si5324_clkin_recalc_rate,
+};
+
+static int si5324_pll_reparent(struct si5324_driver_data *drvdata,
+ int num, enum si5324_pll_src parent)
+{
+ if (parent == SI5324_PLL_SRC_XTAL) {
+ si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN,
+ SI5324_CONTROL_FREE_RUN);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK1);
+ si5324_set_bits(drvdata, SI5324_CKSEL,
+ SI5324_CK_SEL << SI5324_CKSEL_SHIFT,
+ 1 << SI5324_CKSEL_SHIFT);
+ } else if (parent == SI5324_PLL_SRC_CLKIN1) {
+ si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN, 0);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK2);
+ si5324_set_bits(drvdata, SI5324_CKSEL,
+ SI5324_CK_SEL << SI5324_CKSEL_SHIFT, 0);
+ } else if (parent == SI5324_PLL_SRC_CLKIN2) {
+ si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN, 0);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK1);
+ si5324_set_bits(drvdata, SI5324_CKSEL,
+ SI5324_CK_SEL << SI5324_CKSEL_SHIFT,
+ 1 << SI5324_CKSEL_SHIFT);
+ }
+
+ return 0;
+}
+
+static unsigned char si5324_pll_get_parent(struct clk_hw *hw)
+{
+ return 0;
+}
+
+/**
+ * si5324_pll_set_parent - Set parent of clock
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ * @index: Parent index
+ *
+ * This function sets the paraent of clock.
+ *
+ * Return: 0 on success, negative error number on failure
+ */
+static int si5324_pll_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+ enum si5324_pll_src parent;
+
+ if (index == SI5324_SRC_XTAL)
+ parent = SI5324_PLL_SRC_XTAL;
+ else if (index == SI5324_SRC_CLKIN1)
+ parent = SI5324_PLL_SRC_CLKIN1;
+ else if (index == SI5324_SRC_CLKIN2)
+ parent = SI5324_PLL_SRC_CLKIN2;
+ else
+ return -EINVAL;
+
+ return si5324_pll_reparent(hwdata->drvdata, hwdata->num, parent);
+}
+
+/**
+ * si5324_pll_recalc_rate - Recalculate clock frequency
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ * @parent_rate: Clock frequency of parent clock
+ *
+ * This function recalculate clock frequency.
+ *
+ * Return: Current clock frequency
+ */
+static unsigned long si5324_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long rate;
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ if (!hwdata->drvdata->params.valid)
+ si5324_read_parameters(hwdata->drvdata);
+ WARN_ON(!hwdata->drvdata->params.valid);
+
+ rate = parent_rate * hwdata->drvdata->params.n2_ls *
+ hwdata->drvdata->params.n2_hs;
+
+ dev_dbg(&hwdata->drvdata->client->dev,
+ "%s - %s: n2_ls = %u, n2_hs = %u, parent_rate = %lu, rate = %lu\n",
+ __func__, clk_hw_get_name(hw),
+ hwdata->drvdata->params.n2_ls, hwdata->drvdata->params.n2_hs,
+ parent_rate, (unsigned long)rate);
+
+ return rate;
+}
+
+static long si5324_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static int si5324_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+static const struct clk_ops si5324_pll_ops = {
+ .set_parent = si5324_pll_set_parent,
+ .get_parent = si5324_pll_get_parent,
+ .recalc_rate = si5324_pll_recalc_rate,
+ .round_rate = si5324_pll_round_rate,
+ .set_rate = si5324_pll_set_rate,
+};
+
+static int si5324_clkout_set_drive_strength(
+ struct si5324_driver_data *drvdata, int num,
+ enum si5324_drive_strength drive)
+{
+ return 0;
+}
+
+static int si5324_clkout_prepare(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static void si5324_clkout_unprepare(struct clk_hw *hw)
+{
+}
+
+static unsigned long si5324_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long rate;
+
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ rate = hwdata->drvdata->rate_clkout0;
+
+ return rate;
+}
+
+/**
+ * si5324_clkout_round_rate - selects the closest value to requested one.
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ * @rate: Clock rate
+ * @parent_rate: Parent clock rate
+ *
+ * This function selects the rate closest to the requested one.
+ *
+ * Return: Clock rate on success, negative error number on failure
+ */
+static long si5324_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ u32 ncn_ls, n2_ls, n3n, actual_rate;
+ u8 n1_hs, n2_hs, bwsel;
+ int ret;
+
+ ret = si5324_calcfreqsettings(SI5324_REF_CLOCK, rate, &actual_rate,
+ &n1_hs, &ncn_ls, &n2_hs, &n2_ls, &n3n,
+ &bwsel);
+ if (ret < 0)
+ return ret;
+
+ return actual_rate;
+}
+
+static int si5324_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ u32 ncn_ls, n2_ls, n3n, actual_rate;
+ u8 n1_hs, n2_hs, bwsel, buf[SI5324_OUT_REGS * 2];
+ int i, ret, rc;
+
+ ret = si5324_calcfreqsettings(SI5324_REF_CLOCK, rate, &actual_rate,
+ &n1_hs, &ncn_ls, &n2_hs, &n2_ls, &n3n,
+ &bwsel);
+ if (ret < 0)
+ return ret;
+
+ hwdata->drvdata->rate_clkout0 = rate;
+ i = 0;
+
+ /* Enable Free running mode */
+ buf[i] = SI5324_CONTROL;
+ buf[i + 1] = SI5324_FREE_RUN_EN;
+ i += 2;
+
+ /* Loop bandwidth */
+ buf[i] = SI5324_BWSEL;
+ buf[i + 1] = (bwsel << SI5324_BWSEL_SHIFT) | SI5324_BWSEL_DEF_VAL;
+ i += 2;
+
+ /* Enable reference clock 2 in free running mode */
+ buf[i] = SI5324_POWERDOWN;
+ /* Enable input clock 2, Disable input clock 1 */
+ buf[i + 1] = SI5324_PD_CK1_DIS;
+ i += 2;
+
+ /* N1_HS */
+ buf[i] = SI5324_N1_HS;
+ buf[i + 1] = n1_hs << SI5324_N1_HS_VAL_SHIFT;
+ i += 2;
+
+ /* NC1_LS */
+ buf[i] = SI5324_NC1_LS_H;
+ buf[i + 1] = (u8)((ncn_ls & 0x000F0000) >> 16);
+ buf[i + 2] = SI5324_NC1_LS_M;
+ buf[i + 3] = (u8)((ncn_ls & 0x0000FF00) >> 8);
+ buf[i + 4] = SI5324_NC1_LS_L;
+ buf[i + 5] = (u8)(ncn_ls & 0x000000FF);
+ i += 6;
+
+ /* N2_HS and N2_LS */
+ buf[i] = SI5324_N2_HS_LS_H;
+ buf[i + 1] = (n2_hs << SI5324_N2_HS_LS_H_VAL_SHIFT);
+ buf[i + 1] |= (u8)((n2_ls & 0x000F0000) >> 16);
+ buf[i + 2] = SI5324_N2_LS_H;
+ buf[i + 3] = (u8)((n2_ls & 0x0000FF00) >> 8);
+ buf[i + 4] = SI5324_N2_LS_L;
+ buf[i + 5] = (u8)(n2_ls & 0x000000FF);
+ i += 6;
+
+ /* N32 (CLKIN2 or XTAL in FREERUNNING mode) */
+ buf[i] = SI5324_N32_CLKIN_H;
+ buf[i + 2] = SI5324_N32_CLKIN_M;
+ buf[i + 4] = SI5324_N32_CLKIN_L;
+ buf[i + 1] = (u8)((n3n & 0x00070000) >> 16);
+ buf[i + 3] = (u8)((n3n & 0x0000FF00) >> 8);
+ buf[i + 5] = (u8)(n3n & 0x000000FF);
+ i += 6;
+
+ /* Start calibration */
+ buf[i] = SI5324_RESET_CALIB;
+ buf[i + 1] = SI5324_CALIB_EN;
+ i += 2;
+
+ hwdata->drvdata->params.valid = 0;
+ rc = si5324_bulk_scatter_write(hwdata->drvdata, SI5324_OUT_REGS, buf);
+
+ return rc;
+}
+
+static const struct clk_ops si5324_clkout_ops = {
+ .prepare = si5324_clkout_prepare,
+ .unprepare = si5324_clkout_unprepare,
+ .recalc_rate = si5324_clkout_recalc_rate,
+ .round_rate = si5324_clkout_round_rate,
+ .set_rate = si5324_clkout_set_rate,
+};
+
+static const struct of_device_id si5324_dt_ids[] = {
+ { .compatible = "silabs,si5319" },
+ { .compatible = "silabs,si5324" },
+ { .compatible = "silabs,si5328" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, si5324_dt_ids);
+
+static int si5324_dt_parse(struct i2c_client *client)
+{
+ struct device_node *child, *np = client->dev.of_node;
+ struct si5324_platform_data *pdata;
+ struct property *prop;
+ const __be32 *p;
+ int num = 0;
+ u32 val;
+
+ if (!np)
+ return 0;
+
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /*
+ * property silabs,pll-source : <num src>, [<..>]
+ * allow to selectively set pll source
+ */
+ of_property_for_each_u32(np, "silabs,pll-source", prop, p, num) {
+ if (num >= 1) {
+ dev_err(&client->dev,
+ "invalid pll %d on pll-source prop\n", num);
+ return -EINVAL;
+ }
+ p = of_prop_next_u32(prop, p, &val);
+ if (!p) {
+ dev_err(&client->dev,
+ "missing pll-source for pll %d\n", num);
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case 0:
+ dev_dbg(&client->dev, "using xtal as parent for pll\n");
+ pdata->pll_src = SI5324_PLL_SRC_XTAL;
+ break;
+ case 1:
+ dev_dbg(&client->dev,
+ "using clkin1 as parent for pll\n");
+ pdata->pll_src = SI5324_PLL_SRC_CLKIN1;
+ break;
+ case 2:
+ dev_dbg(&client->dev,
+ "using clkin2 as parent for pll\n");
+ pdata->pll_src = SI5324_PLL_SRC_CLKIN2;
+ break;
+ default:
+ dev_err(&client->dev,
+ "invalid parent %d for pll %d\n", val, num);
+ return -EINVAL;
+ }
+ }
+ /* per clkout properties */
+ for_each_child_of_node(np, child) {
+ if (of_property_read_u32(child, "reg", &num)) {
+ dev_err(&client->dev, "missing reg property of %s\n",
+ child->name);
+ goto put_child;
+ }
+
+ if (num >= 2) {
+ dev_err(&client->dev, "invalid clkout %d\n", num);
+ goto put_child;
+ }
+
+ if (!of_property_read_u32(child, "silabs,drive-strength",
+ &val)) {
+ switch (val) {
+ case SI5324_DRIVE_2MA:
+ case SI5324_DRIVE_4MA:
+ case SI5324_DRIVE_6MA:
+ case SI5324_DRIVE_8MA:
+ pdata->clkout[num].drive = val;
+ break;
+ default:
+ dev_err(&client->dev,
+ "invalid drive strength %d for clkout %d\n",
+ val, num);
+ goto put_child;
+ }
+ }
+
+ if (!of_property_read_u32(child, "clock-frequency", &val)) {
+ dev_dbg(&client->dev, "clock-frequency = %u\n", val);
+ pdata->clkout[num].rate = val;
+ } else {
+ dev_err(&client->dev,
+ "missing clock-frequency property of %s\n",
+ child->name);
+ goto put_child;
+ }
+ }
+ client->dev.platform_data = pdata;
+
+ return 0;
+put_child:
+ of_node_put(child);
+ return -EINVAL;
+}
+
+static u8 instance;
+
+static int si5324_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct si5324_platform_data *pdata;
+ struct si5324_driver_data *drvdata;
+ struct clk_init_data init;
+ struct clk *clk;
+ const char *parent_names[3];
+ char inst_names[NUM_NAME_IDS][MAX_NAME_LEN];
+ u8 num_parents, num_clocks;
+ int ret, n;
+ enum si53xx_variant variant = id->driver_data;
+
+ if (variant > si5328) {
+ dev_err(&client->dev, "si53xx device not present\n");
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev, "%s probed\n", si53xx_variant_name[variant]);
+ ret = si5324_dt_parse(client);
+ if (ret)
+ return ret;
+
+ pdata = client->dev.platform_data;
+ if (!pdata)
+ return -EINVAL;
+
+ drvdata = devm_kzalloc(&client->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->client = client;
+ drvdata->pxtal = devm_clk_get(&client->dev, "xtal");
+ drvdata->pclkin1 = devm_clk_get(&client->dev, "clkin1");
+ drvdata->pclkin2 = devm_clk_get(&client->dev, "clkin2");
+
+ if (PTR_ERR(drvdata->pxtal) == -EPROBE_DEFER ||
+ PTR_ERR(drvdata->pclkin1) == -EPROBE_DEFER ||
+ PTR_ERR(drvdata->pclkin2) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ drvdata->regmap = devm_regmap_init_i2c(client, &si5324_regmap_config);
+ if (IS_ERR(drvdata->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(drvdata->regmap);
+ }
+
+ i2c_set_clientdata(client, drvdata);
+ si5324_initialize(drvdata);
+
+ /* setup input clock configuration */
+ ret = si5324_pll_reparent(drvdata, 0, pdata->pll_src);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to reparent pll to %d\n",
+ pdata->pll_src);
+ return ret;
+ }
+
+ for (n = 0; n < SI5324_MAX_CLKOUTS; n++) {
+ ret = si5324_clkout_set_drive_strength(drvdata, n,
+ pdata->clkout[n].drive);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed set drive strength of clkout%d to %d\n",
+ n, pdata->clkout[n].drive);
+ return ret;
+ }
+ }
+
+ if (!IS_ERR(drvdata->pxtal))
+ clk_prepare_enable(drvdata->pxtal);
+ if (!IS_ERR(drvdata->pclkin1))
+ clk_prepare_enable(drvdata->pclkin1);
+ if (!IS_ERR(drvdata->pclkin2))
+ clk_prepare_enable(drvdata->pclkin2);
+
+ /* create instance names by appending instance id */
+ for (n = 0; n < SI5324_SRC_CLKS; n++) {
+ sprintf(inst_names[n], "%s_%d", si5324_input_names[n],
+ instance);
+ }
+ sprintf(inst_names[3], "%s_%d", si5324_pll_name, instance);
+ for (n = 0; n < SI5324_MAX_CLKOUTS; n++) {
+ sprintf(inst_names[n + 4], "%s_%d", si5324_clkout_names[n],
+ instance);
+ }
+
+ /* register xtal input clock gate */
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[0];
+ init.ops = &si5324_xtal_ops;
+ init.flags = 0;
+
+ if (!IS_ERR(drvdata->pxtal)) {
+ drvdata->pxtal_name = __clk_get_name(drvdata->pxtal);
+ init.parent_names = &drvdata->pxtal_name;
+ init.num_parents = 1;
+ }
+ drvdata->xtal.init = &init;
+
+ clk = devm_clk_register(&client->dev, &drvdata->xtal);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n", init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+
+ /* register clkin1 input clock gate */
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[1];
+ init.ops = &si5324_clkin_ops;
+ if (!IS_ERR(drvdata->pclkin1)) {
+ drvdata->pclkin1_name = __clk_get_name(drvdata->pclkin1);
+ init.parent_names = &drvdata->pclkin1_name;
+ init.num_parents = 1;
+ }
+
+ drvdata->clkin1.init = &init;
+ clk = devm_clk_register(&client->dev, &drvdata->clkin1);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n",
+ init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+
+ /* register clkin2 input clock gate */
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[2];
+ init.ops = &si5324_clkin_ops;
+ if (!IS_ERR(drvdata->pclkin2)) {
+ drvdata->pclkin2_name = __clk_get_name(drvdata->pclkin2);
+ init.parent_names = &drvdata->pclkin2_name;
+ init.num_parents = 1;
+ }
+
+ drvdata->clkin2.init = &init;
+ clk = devm_clk_register(&client->dev, &drvdata->clkin2);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n",
+ init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+
+ /* Si5324 allows to mux xtal or clkin1 or clkin2 to PLL input */
+ num_parents = SI5324_SRC_CLKS;
+ parent_names[0] = inst_names[0];
+ parent_names[1] = inst_names[1];
+ parent_names[2] = inst_names[2];
+
+ /* register PLL */
+ drvdata->pll.drvdata = drvdata;
+ drvdata->pll.hw.init = &init;
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[3];
+ init.ops = &si5324_pll_ops;
+ init.flags = 0;
+ init.flags |= CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ clk = devm_clk_register(&client->dev, &drvdata->pll.hw);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n", init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+
+ /* register clk out divider */
+ num_clocks = 2;
+ num_parents = 1;
+ parent_names[0] = inst_names[3];
+
+ drvdata->clkout = devm_kzalloc(&client->dev, num_clocks *
+ sizeof(*drvdata->clkout), GFP_KERNEL);
+
+ drvdata->onecell.clk_num = num_clocks;
+ drvdata->onecell.clks = devm_kzalloc(&client->dev,
+ num_clocks *
+ sizeof(*drvdata->onecell.clks),
+ GFP_KERNEL);
+
+ if (WARN_ON(!drvdata->clkout) || !drvdata->onecell.clks) {
+ ret = -ENOMEM;
+ goto err_clk;
+ }
+
+ for (n = 0; n < num_clocks; n++) {
+ drvdata->clkout[n].num = n;
+ drvdata->clkout[n].drvdata = drvdata;
+ drvdata->clkout[n].hw.init = &init;
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[4 + n];
+ init.ops = &si5324_clkout_ops;
+ init.flags = 0;
+ init.flags |= CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ clk = devm_clk_register(&client->dev, &drvdata->clkout[n].hw);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n",
+ init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+ /* refer to output clock in onecell */
+ drvdata->onecell.clks[n] = clk;
+
+ /* set initial clkout rate */
+ if (pdata->clkout[n].rate != 0) {
+ int ret;
+
+ ret = clk_set_rate(clk, pdata->clkout[n].rate);
+ if (ret != 0) {
+ dev_err(&client->dev, "Cannot set rate : %d\n",
+ ret);
+ }
+ }
+ }
+
+ ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
+ &drvdata->onecell);
+ if (ret) {
+ dev_err(&client->dev, "unable to add clk provider\n");
+ goto err_clk;
+ }
+
+ dev_info(&client->dev, "%s probe successful\n",
+ si53xx_variant_name[variant]);
+ instance++;
+ return 0;
+
+err_clk:
+ if (!IS_ERR(drvdata->pxtal))
+ clk_disable_unprepare(drvdata->pxtal);
+ if (!IS_ERR(drvdata->pclkin1))
+ clk_disable_unprepare(drvdata->pclkin1);
+ if (!IS_ERR(drvdata->pclkin2))
+ clk_disable_unprepare(drvdata->pclkin2);
+
+ return ret;
+}
+
+static int si5324_i2c_remove(struct i2c_client *client)
+{
+ of_clk_del_provider(client->dev.of_node);
+ return 0;
+}
+
+static const struct i2c_device_id si5324_i2c_ids[] = {
+ { "si5319", si5319 },
+ { "si5324", si5324 },
+ { "si5328", si5328 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si5324_i2c_ids);
+
+static struct i2c_driver si5324_driver = {
+ .driver = {
+ .name = "si5324",
+ .of_match_table = of_match_ptr(si5324_dt_ids),
+ },
+ .probe = si5324_i2c_probe,
+ .remove = si5324_i2c_remove,
+ .id_table = si5324_i2c_ids,
+};
+module_i2c_driver(si5324_driver);
+
+MODULE_AUTHOR("Venkateshwar Rao G <vgannava@xilinx.com>");
+MODULE_DESCRIPTION("Silicon Labs 5319/5324/5328 clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-si5324.h b/drivers/clk/clk-si5324.h
new file mode 100644
index 000000000000..48e62a67f56e
--- /dev/null
+++ b/drivers/clk/clk-si5324.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Si5324 clock driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Authors: Leon Woestenberg <leon@sidebranch.com>
+ * Venkateshwar Rao <vgannava@xilinx.com>
+ */
+
+#ifndef _CLK_SI5324_H_
+#define _CLK_SI5324_H_
+
+#define SI5324_BUS_BASE_ADDR 0x68
+
+#define SI5324_CONTROL 0
+#define SI5324_CONTROL_FREE_RUN BIT(6)
+#define SI5324_FREE_RUN_EN 0x54
+
+#define SI5324_INCK_PRIOR 1
+#define SI5324_INCK_PRIOR_1_MASK 0xC
+#define SI5324_INCK_PRIOI_2_MASK 0x3
+
+#define SI5324_BWSEL 2
+#define SI5324_BWSEL_MASK 0xF0
+#define SI5324_BWSEL_SHIFT 4
+#define SI5324_BWSEL_DEF_VAL 2
+
+#define SI5324_CKSEL 3
+#define SI5324_CKSEL_SQL_ICAL BIT(4)
+#define SI5324_CKSEL_SHIFT 6
+#define SI5324_CK_SEL 3
+
+#define SI3324_AUTOSEL 4
+#define SI5324_AUTOSEL_DEF 0x12
+
+#define SI5324_ICMOS 5
+#define SI5324_OUTPUT_SIGFMT 6
+#define SI5324_OUTPUT_SF1_DEFAULT 0xF
+#define SI5324_REFFRE_FOS 7
+#define SI5324_HLOG 8
+#define SI5324_AVG_HIST 9
+#define SI5324_DSBL_CLKOUT 10
+#define SI5324_DSBL_CLKOUT2 BIT(3)
+#define SI5324_POWERDOWN 11
+#define SI5324_PD_CK1 BIT(0)
+#define SI5324_PD_CK2 BIT(1)
+#define SI5324_PD_CK1_DIS 0x41
+#define SI5324_PD_CK2_DIS 0x42
+#define SI5324_FOS_LOCKT 19
+#define SI5324_FOS_DEFAULT 0x23
+#define SI5324_CK_ACTV_SEL 21
+#define SI5324_CK_DEFAULT 0xFC
+#define SI5324_CK_ACTV BIT(1)
+#define SI5324_CK_SELPIN BIT(1)
+#define SI5324_LOS_MSK 23
+#define SI5324_FOS_L0L_MASK 24
+
+/* output clock dividers */
+#define SI5324_N1_HS 25
+#define SI5324_N1_HS_VAL_SHIFT 5
+#define SI5324_HSHIFT 16
+#define SI5324_LSHIFT 8
+#define SI5324_NC1_LS_H 31
+#define SI5324_NC1_LS_M 32
+#define SI5324_NC1_LS_L 33
+#define SI5324_DIV_LS_MASK 0x0F
+#define SI5324_DIV_HS_MASK 0xF0
+#define SI5324_NC2_LS_H 34
+#define SI5324_NC2_LS_M 35
+#define SI5324_NC2_LS_L 36
+
+#define SI5324_N2_HS_LS_H 40
+#define SI5324_N2_HS_LS_H_VAL_SHIFT 5
+#define SI5324_N2_LS_H 41
+#define SI5324_N2_LS_L 42
+#define SI5324_N31_CLKIN_H 43
+#define SI5324_N31_CLKIN_M 44
+#define SI5324_N31_CLKIN_L 45
+#define SI5324_N32_CLKIN_H 46
+#define SI5324_N32_CLKIN_M 47
+#define SI5324_N32_CLKIN_L 48
+#define SI5324_FOS_CLKIN_RATE 55
+#define SI5324_PLL_ACTV_CLK 128
+#define SI5324_LOS_STATUS 129
+#define SI5324_CLKIN_LOL_STATUS 130
+#define SI5324_LOS_FLG 131
+#define SI5324_FOS_FLG 132
+#define SI5324_PARTNO_H 134
+#define SI5324_PARTNO_L 135
+
+#define SI5324_RESET_CALIB 136
+#define SI5324_RST_ALL BIT(7)
+#define SI5324_CALIB_EN BIT(6)
+
+#define SI5324_FASTLOCK 137
+#define SI5324_FASTLOCK_EN BIT(0)
+#define SI5324_LOS1_LOS2_EN 138
+#define SI5324_SKEW1 142
+#define SI5324_SKEW2 143
+
+/* selects 2kHz to 710 MHz */
+#define SI5324_CLKIN_MIN_FREQ 2000
+#define SI5324_CLKIN_MAX_FREQ (710 * 1000 * 1000)
+
+/* generates 2kHz to 945 MHz */
+#define SI5324_CLKOUT_MIN_FREQ 2000
+#define SI5324_CLKOUT_MAX_FREQ (945 * 1000 * 1000)
+
+/* The following constants define the limits of the divider settings. */
+#define SI5324_N1_HS_MIN 6
+#define SI5324_N1_HS_MAX 11
+#define SI5324_NC_LS_MIN 1
+#define SI5324_NC_LS_MAX 0x100000
+#define SI5324_N2_HS_MIN 4
+#define SI5324_N2_HS_MAX 11
+#define SI5324_N2_LS_MIN 2
+#define SI5324_N2_LS_MAX 0x100000
+#define SI5324_N3_MIN 1
+#define SI5324_N3_MAX 0x080000
+
+#define SI5324_SRC_XTAL 0
+#define SI5324_SRC_CLKIN1 1
+#define SI5324_SRC_CLKIN2 2
+#define SI5324_SRC_CLKS 3
+
+#define SI5324_CLKIN1 0
+#define SI5324_CLKIN2 1
+#define SI5324_MAX_CLKOUTS 2
+#define NUM_NAME_IDS 6 /* 3 clkin, 1 pll, 2 clkout */
+#define MAX_NAME_LEN 11
+#define SI5324_PARAM_LEN 24
+#define SI5324_NC_PARAM_LEN 6
+#define SI5324_OUT_REGS 14
+#define SI5324_N1_PARAM_LEN 1
+#define SI5324_N2_PARAM_LEN 9
+#define SI5324_REF_CLOCK 114285000UL
+#define SI5324_RESET_DELAY_MS 20
+
+#endif
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 2eb7f8cd27a8..088d4c3bda42 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -285,6 +285,34 @@ struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent);
+static unsigned int sibling;
+
+static void clk_show_subtree(struct clk_core *c,
+ int level)
+{
+ struct clk_core *child;
+
+ if (!c)
+ return;
+
+ if (level == 1)
+ sibling++;
+
+ hlist_for_each_entry(child, &c->children, child_node)
+ clk_show_subtree(child, level + 1);
+}
+
+unsigned int clk_get_children(char *name)
+{
+ struct clk_core *core;
+ struct clk *pclk = __clk_lookup(name);
+ sibling = 0;
+
+ core = pclk->core;
+ clk_show_subtree(core, 0);
+ return sibling;
+}
+
static struct clk_core *__clk_lookup_subtree(const char *name,
struct clk_core *core)
{
diff --git a/drivers/clk/idt/Makefile b/drivers/clk/idt/Makefile
new file mode 100644
index 000000000000..4cf2b6e4801d
--- /dev/null
+++ b/drivers/clk/idt/Makefile
@@ -0,0 +1,3 @@
+obj-y += clk-idt8t49n24x-core.o
+obj-y += clk-idt8t49n24x-debugfs.o
+obj-y += clk-idt8t49n24x.o
diff --git a/drivers/clk/idt/clk-idt8t49n24x-core.c b/drivers/clk/idt/clk-idt8t49n24x-core.c
new file mode 100644
index 000000000000..ad23014e708f
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x-core.c
@@ -0,0 +1,933 @@
+// SPDX-License-Identifier: GPL-2.0
+/* clk-idt8t49n24x-core.c - Program 8T49N24x settings via I2C (common code)
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#include "clk-idt8t49n24x-core.h"
+
+/*
+ * In Timing Commander, Q0 is changed from 25MHz to Q0 75MHz, the following
+ * changes occur:
+ *
+ * 2 bytes change in EEPROM data string.
+ *
+ * DSM_INT R0025[0],R0026[7:0] : 35 => 30
+ * NS2_Q0 R0040[7:0],R0041[7:0] : 14 => 4
+ *
+ * In EEPROM
+ * 1. R0026
+ * 2. R0041
+ *
+ * Note that VCO_Frequency (metadata) also changed (3500 =>3000).
+ * This reflects a change to DSM_INT.
+ *
+ * Note that the Timing Commander code has workarounds in the workflow scripts
+ * to handle dividers for the 8T49N241 (because the development of that GUI
+ * predates chip override functionality). That affects NS1_Qx (x in 1-3)
+ * and NS2_Qx. NS1_Qx contains the upper bits of NS_Qx, and NS2_Qx contains
+ * the lower bits. That is NOT the case for Q0, though. In that case NS1_Q0
+ * is the 1st stage output divider (/5, /6, /4) and NS2_Q0 is the 16-bit
+ * second stage (with actual divide being twice the value stored in the
+ * register).
+ *
+ * NS1_Q0 R003F[1:0]
+ */
+
+#define IDT24x_VCO_MIN 2999997000u
+#define IDT24x_VCO_MAX 4000004000u
+#define IDT24x_VCO_OPT 3500000000u
+#define IDT24x_MIN_INT_DIVIDER 6
+#define IDT24x_MIN_NS1 4
+#define IDT24x_MAX_NS1 6
+
+static u8 q0_ns1_options[3] = { 5, 6, 4 };
+
+/**
+ * bits_to_shift - num bits to shift given specified mask
+ * @mask: 32-bit word input to count zero bits on right
+ *
+ * Given a bit mask indicating where a value will be stored in
+ * a register, return the number of bits you need to shift the value
+ * before ORing it into the register value.
+ *
+ * Return: number of bits to shift
+ */
+int bits_to_shift(unsigned int mask)
+{
+ /* the number of zero bits on the right */
+ unsigned int c = 32;
+
+ mask &= ~mask + 1;
+ if (mask)
+ c--;
+ if (mask & 0x0000FFFF)
+ c -= 16;
+ if (mask & 0x00FF00FF)
+ c -= 8;
+ if (mask & 0x0F0F0F0F)
+ c -= 4;
+ if (mask & 0x33333333)
+ c -= 2;
+ if (mask & 0x55555555)
+ c -= 1;
+ return c;
+}
+
+/*
+ * TODO: Consider replacing this with regmap_multi_reg_write, which
+ * supports introducing a delay after each write. Experiment to see if
+ * the writes succeed consistently when using that API.
+ */
+static int regmap_bulk_write_with_retry(
+ struct regmap *map, unsigned int offset, u8 val[],
+ int val_count, int max_attempts)
+{
+ int err = 0;
+ int count = 1;
+
+ do {
+ err = regmap_bulk_write(map, offset, val, val_count);
+ if (err == 0)
+ return 0;
+
+ usleep_range(100, 200);
+ } while (count++ <= max_attempts);
+ return err;
+}
+
+static int regmap_write_with_retry(
+ struct regmap *map, unsigned int offset, unsigned int val,
+ int max_attempts)
+{
+ int err = 0;
+ int count = 1;
+
+ do {
+ err = regmap_write(map, offset, val);
+ if (err == 0)
+ return 0;
+ usleep_range(100, 200);
+ } while (count++ <= max_attempts);
+ return err;
+}
+
+/*
+ * TODO: Consider using regmap_multi_reg_write instead. Explore
+ * use of regmap to configure WRITE_BLOCK_SIZE, and using the delay
+ * mechanism in regmap_multi_reg_write instead of retrying multiple
+ * times (regmap_bulk_write_with_retry).
+ */
+int i2cwritebulk(
+ struct i2c_client *client, struct regmap *map,
+ unsigned int reg, u8 val[], size_t val_count)
+{
+ char dbg[128];
+ u8 block[WRITE_BLOCK_SIZE];
+ unsigned int block_offset = reg;
+ int x;
+ int err = 0;
+ int currentOffset = 0;
+
+ dev_dbg(&client->dev, "I2C->0x%04x : [hex] . First byte: %02x, Second byte: %02x",
+ reg, reg >> 8, reg & 0xFF);
+ dbg[0] = 0;
+
+ for (x = 0; x < val_count; x++) {
+ char data[4];
+
+ block[currentOffset++] = val[x];
+ sprintf(data, "%02x ", val[x]);
+ strcat(dbg, data);
+ if (x > 0 && (x + 1) % WRITE_BLOCK_SIZE == 0) {
+ dev_dbg(&client->dev, "%s", dbg);
+ dbg[0] = '\0';
+ sprintf(dbg,
+ "(loop) calling regmap_bulk_write @ 0x%04x [%d bytes]",
+ block_offset, WRITE_BLOCK_SIZE);
+ dev_dbg(&client->dev, "%s", dbg);
+ dbg[0] = '\0';
+ err = regmap_bulk_write_with_retry(
+ map, block_offset, block, WRITE_BLOCK_SIZE, 5);
+ if (err != 0)
+ break;
+ block_offset += WRITE_BLOCK_SIZE;
+ currentOffset = 0;
+ }
+ }
+ if (err == 0 && currentOffset > 0) {
+ dev_dbg(&client->dev, "%s", dbg);
+ dev_dbg(&client->dev, "(final) calling regmap_bulk_write @ 0x%04x [%d bytes]",
+ block_offset, currentOffset);
+ err = regmap_bulk_write_with_retry(
+ map, block_offset, block, currentOffset, 5);
+ }
+
+ return err;
+}
+
+static int i2cwrite(
+ struct i2c_client *client, struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ int err;
+
+ dev_dbg(&client->dev, "I2C->0x%x : [hex] %x", reg, val);
+ err = regmap_write_with_retry(map, reg, val, 5);
+ usleep_range(100, 200);
+ return err;
+}
+
+static int i2cwritewithmask(
+ struct i2c_client *client, struct regmap *map, unsigned int reg,
+ u8 val, u8 original, u8 mask)
+{
+ return i2cwrite(client, map, reg,
+ ((val << bits_to_shift(mask)) & mask) | (original & ~mask));
+}
+
+int idt24x_get_offsets(
+ u8 output_num,
+ struct clk_register_offsets *offsets)
+{
+ switch (output_num) {
+ case 0:
+ offsets->oe_offset = IDT24x_REG_OUTEN;
+ offsets->oe_mask = IDT24x_REG_OUTEN0_MASK;
+ offsets->dis_mask = IDT24x_REG_Q0_DIS_MASK;
+ offsets->ns1_offset = IDT24x_REG_NS1_Q0;
+ offsets->ns1_offset_mask = IDT24x_REG_NS1_Q0_MASK;
+ offsets->ns2_15_8_offset = IDT24x_REG_NS2_Q0_15_8;
+ offsets->ns2_7_0_offset = IDT24x_REG_NS2_Q0_7_0;
+ break;
+ case 1:
+ offsets->oe_offset = IDT24x_REG_OUTEN;
+ offsets->oe_mask = IDT24x_REG_OUTEN1_MASK;
+ offsets->dis_mask = IDT24x_REG_Q1_DIS_MASK;
+ offsets->n_17_16_offset = IDT24x_REG_N_Q1_17_16;
+ offsets->n_17_16_mask = IDT24x_REG_N_Q1_17_16_MASK;
+ offsets->n_15_8_offset = IDT24x_REG_N_Q1_15_8;
+ offsets->n_7_0_offset = IDT24x_REG_N_Q1_7_0;
+ offsets->nfrac_27_24_offset = IDT24x_REG_NFRAC_Q1_27_24;
+ offsets->nfrac_27_24_mask =
+ IDT24x_REG_NFRAC_Q1_27_24_MASK;
+ offsets->nfrac_23_16_offset = IDT24x_REG_NFRAC_Q1_23_16;
+ offsets->nfrac_15_8_offset = IDT24x_REG_NFRAC_Q1_15_8;
+ offsets->nfrac_7_0_offset = IDT24x_REG_NFRAC_Q1_7_0;
+ break;
+ case 2:
+ offsets->oe_offset = IDT24x_REG_OUTEN;
+ offsets->oe_mask = IDT24x_REG_OUTEN2_MASK;
+ offsets->dis_mask = IDT24x_REG_Q2_DIS_MASK;
+ offsets->n_17_16_offset = IDT24x_REG_N_Q2_17_16;
+ offsets->n_17_16_mask = IDT24x_REG_N_Q2_17_16_MASK;
+ offsets->n_15_8_offset = IDT24x_REG_N_Q2_15_8;
+ offsets->n_7_0_offset = IDT24x_REG_N_Q2_7_0;
+ offsets->nfrac_27_24_offset = IDT24x_REG_NFRAC_Q2_27_24;
+ offsets->nfrac_27_24_mask =
+ IDT24x_REG_NFRAC_Q2_27_24_MASK;
+ offsets->nfrac_23_16_offset = IDT24x_REG_NFRAC_Q2_23_16;
+ offsets->nfrac_15_8_offset = IDT24x_REG_NFRAC_Q2_15_8;
+ offsets->nfrac_7_0_offset = IDT24x_REG_NFRAC_Q2_7_0;
+ break;
+ case 3:
+ offsets->oe_offset = IDT24x_REG_OUTEN;
+ offsets->oe_mask = IDT24x_REG_OUTEN3_MASK;
+ offsets->dis_mask = IDT24x_REG_Q3_DIS_MASK;
+ offsets->n_17_16_offset = IDT24x_REG_N_Q3_17_16;
+ offsets->n_17_16_mask = IDT24x_REG_N_Q3_17_16_MASK;
+ offsets->n_15_8_offset = IDT24x_REG_N_Q3_15_8;
+ offsets->n_7_0_offset = IDT24x_REG_N_Q3_7_0;
+ offsets->nfrac_27_24_offset = IDT24x_REG_NFRAC_Q3_27_24;
+ offsets->nfrac_27_24_mask =
+ IDT24x_REG_NFRAC_Q3_27_24_MASK;
+ offsets->nfrac_23_16_offset = IDT24x_REG_NFRAC_Q3_23_16;
+ offsets->nfrac_15_8_offset = IDT24x_REG_NFRAC_Q3_15_8;
+ offsets->nfrac_7_0_offset = IDT24x_REG_NFRAC_Q3_7_0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * idt24x_calc_div_q0 - Calculate dividers and VCO freq to generate
+ * the specified Q0 frequency.
+ * @chip: Device data structure. contains all requested frequencies
+ * for all outputs.
+ *
+ * The actual output divider is ns1 * ns2 * 2. fOutput = fVCO / (ns1 * ns2 * 2)
+ *
+ * The options for ns1 (when the source is the VCO) are 4,5,6. ns2 is a
+ * 16-bit value.
+ *
+ * chip->divs: structure for specifying ns1/ns2 values. If 0 after this
+ * function, Q0 is not requested
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int idt24x_calc_div_q0(struct clk_idt24x_chip *chip)
+{
+ u8 x;
+ u32 min_div, max_div, best_vco = 0;
+ u16 min_ns2, max_ns2;
+ bool is_lower_vco = false;
+
+ chip->divs.ns1_q0 = 0;
+ chip->divs.ns2_q0 = 0;
+
+ if (chip->clk[0].requested == 0)
+ return 0;
+
+ min_div = div64_u64(
+ (u64)IDT24x_VCO_MIN, chip->clk[0].requested * 2) * 2;
+ max_div = div64_u64(
+ (u64)IDT24x_VCO_MAX, chip->clk[0].requested * 2) * 2;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. requested: %u, min_div: %u, max_div: %u",
+ __func__, chip->clk[0].requested, min_div, max_div);
+
+ min_ns2 = div64_u64((u64)min_div, IDT24x_MAX_NS1 * 2);
+ max_ns2 = div64_u64((u64)max_div, IDT24x_MIN_NS1 * 2);
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. min_ns2: %u, max_ns2: %u", __func__, min_ns2, max_ns2);
+
+ for (x = 0; x < ARRAY_SIZE(q0_ns1_options); x++) {
+ u16 y = min_ns2;
+
+ while (y <= max_ns2) {
+ u32 actual_div = q0_ns1_options[x] * y * 2;
+ u32 current_vco = actual_div *
+ chip->clk[0].requested;
+
+ if (current_vco < IDT24x_VCO_MIN)
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. ignore div: (ns1=%u * ns2=%u * 2 * %u) == %u < %u",
+ __func__, q0_ns1_options[x], y,
+ chip->clk[0].requested,
+ current_vco, IDT24x_VCO_MIN);
+ else if (current_vco > IDT24x_VCO_MAX) {
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. ignore div: (ns1=%u * ns2=%u * 2 * %u) == %u > %u. EXIT LOOP.",
+ __func__, q0_ns1_options[x], y,
+ chip->clk[0].requested,
+ current_vco, IDT24x_VCO_MAX);
+ y = max_ns2;
+ } else {
+ bool use = false;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. contender: (ns1=%u * ns2=%u * 2 * %u) == %u [in range]",
+ __func__, q0_ns1_options[x], y,
+ chip->clk[0].requested,
+ current_vco);
+ if (current_vco <= IDT24x_VCO_OPT) {
+ if (current_vco > best_vco ||
+ !is_lower_vco) {
+ is_lower_vco = true;
+ use = true;
+ }
+ } else if (!is_lower_vco &&
+ current_vco > best_vco)
+ use = true;
+ if (use) {
+ chip->divs.ns1_q0 = x;
+ chip->divs.ns2_q0 = y;
+ best_vco = current_vco;
+ }
+ }
+ y++;
+ }
+ }
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. best: (ns1=%u [/%u] * ns2=%u * 2 * %u) == %u",
+ __func__, chip->divs.ns1_q0, q0_ns1_options[chip->divs.ns1_q0],
+ chip->divs.ns2_q0, chip->clk[0].requested, best_vco);
+ return 0;
+}
+
+/**
+ * idt24x_calc_divs - Calculate dividers to generate the specified frequency.
+ * @chip: Device data structure. contains all requested frequencies
+ * for all outputs.
+ *
+ * Calculate the clock dividers (dsmint, dsmfrac for vco; ns1/ns2 for q0,
+ * n/nfrac for q1-3) for a given target frequency.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int idt24x_calc_divs(struct clk_idt24x_chip *chip)
+{
+ u32 vco = 0;
+ int result;
+
+ result = idt24x_calc_div_q0(chip);
+ if (result < 0)
+ return result;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: after idt24x_calc_div_q0. ns1: %u [/%u], ns2: %u",
+ __func__, chip->divs.ns1_q0, q0_ns1_options[chip->divs.ns1_q0],
+ chip->divs.ns2_q0);
+
+ chip->divs.dsmint = 0;
+ chip->divs.dsmfrac = 0;
+
+ if (chip->clk[0].requested > 0) {
+ /* Q0 is in use and is governing the actual VCO freq */
+ vco = q0_ns1_options[chip->divs.ns1_q0] * chip->divs.ns2_q0 *
+ 2 * chip->clk[0].requested;
+ } else {
+ u32 freq = 0;
+ u32 walk;
+ u32 min_div, max_div;
+ bool is_lower_vco = false;
+
+ /*
+ * Q0 is not in use. Use the first requested (fractional)
+ * output frequency as the one controlling the VCO.
+ */
+ for (walk = 1; walk < NUM_OUTPUTS; walk++) {
+ if (chip->clk[walk].requested != 0) {
+ freq = chip->clk[walk].requested;
+ break;
+ }
+ }
+
+ if (freq == 0) {
+ dev_err(&chip->i2c_client->dev,
+ "%s: NO FREQUENCIES SPECIFIED", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * First, determine the min/max div for the output frequency.
+ */
+ min_div = IDT24x_MIN_INT_DIVIDER;
+ max_div = div64_u64((u64)IDT24x_VCO_MAX, freq * 2) * 2;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: calc_divs for fractional output. freq: %u, min_div: %u, max_div: %u",
+ __func__, freq, min_div, max_div);
+
+ walk = min_div;
+
+ while (walk <= max_div) {
+ u32 current_vco = freq * walk;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: calc_divs for fractional output. walk: %u, freq: %u, vco: %u",
+ __func__, walk, freq, vco);
+ if (current_vco >= IDT24x_VCO_MIN &&
+ vco <= IDT24x_VCO_MAX) {
+ if (current_vco <= IDT24x_VCO_OPT) {
+ if (current_vco > vco ||
+ !is_lower_vco) {
+ is_lower_vco = true;
+ vco = current_vco;
+ }
+ } else if (!is_lower_vco && current_vco > vco) {
+ vco = current_vco;
+ }
+ }
+ /* Divider must be even. */
+ walk += 2;
+ }
+ }
+
+ if (vco != 0) {
+ u32 pfd;
+ u64 rem;
+ int x;
+
+ /* Setup dividers for outputs with fractional dividers. */
+ for (x = 1; x < NUM_OUTPUTS; x++) {
+ if (chip->clk[x].requested != 0) {
+ /*
+ * The value written to the chip is half
+ * the calculated divider.
+ */
+ chip->divs.nint[x - 1] = div64_u64_rem(
+ (u64)vco,
+ chip->clk[x].requested * 2,
+ &rem);
+ chip->divs.nfrac[x - 1] = div64_u64(
+ rem * 1 << 28,
+ chip->clk[x].requested * 2);
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: div to get Q%i freq %u from vco %u: int part: %u, rem: %llu, frac part: %u",
+ __func__, x,
+ chip->clk[x].requested,
+ vco, chip->divs.nint[x - 1], rem,
+ chip->divs.nfrac[x - 1]);
+ }
+ }
+
+ /* Calculate freq for pfd */
+ pfd = chip->input_clk_freq * (chip->doubler_disabled ? 1 : 2);
+
+ /*
+ * Calculate dsmint & dsmfrac:
+ * -----------------------------
+ * dsm = float(vco)/float(pfd)
+ * dsmfrac = dsm-floor(dsm) * 2^21
+ * rem = vco % pfd
+ * therefore:
+ * dsmfrac = (rem * 2^21)/pfd
+ */
+ chip->divs.dsmint = div64_u64_rem(vco, pfd, &rem);
+ chip->divs.dsmfrac = div64_u64(rem * 1 << 21, pfd);
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: vco: %u, pfd: %u, dsmint: %u, dsmfrac: %u, rem: %llu",
+ __func__, vco, pfd, chip->divs.dsmint,
+ chip->divs.dsmfrac, rem);
+ } else {
+ dev_err(&chip->i2c_client->dev,
+ "%s: no integer divider in range found. NOT SUPPORTED.",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * idt24x_enable_output - Enable/disable a particular output
+ * @chip: Device data structure
+ * @output: Output to enable/disable
+ * @enable: Enable (true/false)
+ *
+ * Return: passes on regmap_write return value.
+ */
+static int idt24x_enable_output(
+ struct clk_idt24x_chip *chip, u8 output, bool enable)
+{
+ struct clk_register_offsets offsets;
+ int err;
+ struct i2c_client *client = chip->i2c_client;
+
+ /*
+ * When an output is enabled, enable it in the original
+ * data read from the chip and cached. Otherwise it may be
+ * accidentally turned off when another output is enabled.
+ *
+ * E.g., the driver starts with all outputs off in reg_out_en_x.
+ * Q1 is enabled with the appropriate mask. Q2 is then enabled,
+ * which results in Q1 being turned back off (because Q1 was off
+ * in reg_out_en_x).
+ */
+
+ err = idt24x_get_offsets(output, &offsets);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error calling idt24x_get_offsets for %d: %i",
+ __func__, output, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: q%u enable? %d. reg_out_en_x before: 0x%x, reg_out_mode_0_1 before: 0x%x, reg_out_mode_2_3 before: 0x%x, reg_qx_dis before: 0x%x",
+ __func__, output, enable, chip->reg_out_en_x,
+ chip->reg_out_mode_0_1, chip->reg_out_mode_2_3,
+ chip->reg_qx_dis);
+
+ chip->reg_out_en_x = chip->reg_out_en_x & ~offsets.oe_mask;
+ if (enable)
+ chip->reg_out_en_x |= (1 << bits_to_shift(offsets.oe_mask));
+
+ chip->reg_qx_dis = chip->reg_qx_dis & ~offsets.dis_mask;
+ dev_dbg(&client->dev,
+ "%s: q%u enable? %d. reg_qx_dis mask: 0x%x, before checking enable: 0x%x",
+ __func__, output, enable, offsets.dis_mask,
+ chip->reg_qx_dis);
+ if (!enable)
+ chip->reg_qx_dis |= (1 << bits_to_shift(offsets.dis_mask));
+
+ dev_dbg(&client->dev,
+ "%s: q%u enable? %d. reg_out_en_x after: 0x%x, reg_qx_dis after: 0x%x",
+ __func__, output, enable, chip->reg_out_en_x,
+ chip->reg_qx_dis);
+
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_OUTEN, chip->reg_out_en_x);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_OUTEN: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_OUTMODE0_1,
+ chip->reg_out_mode_0_1);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_OUTMODE0_1: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_OUTMODE2_3,
+ chip->reg_out_mode_2_3);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_OUTMODE2_3: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_Q_DIS, chip->reg_qx_dis);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_Q_DIS: %i",
+ __func__, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * idt24x_update_device - write registers to the chip
+ * @chip: Device data structure
+ *
+ * Write all values to hardware that we have calculated.
+ *
+ * Return: passes on regmap_bulk_write return value.
+ */
+static int idt24x_update_device(struct clk_idt24x_chip *chip)
+{
+ int err;
+ struct i2c_client *client = chip->i2c_client;
+ int x = -1;
+
+ dev_dbg(&client->dev,
+ "%s: setting DSM_INT_8 (val %u @ %u)",
+ __func__, chip->divs.dsmint >> 8,
+ IDT24x_REG_DSM_INT_8);
+ err = i2cwritewithmask(
+ client, chip->regmap, IDT24x_REG_DSM_INT_8,
+ (chip->divs.dsmint >> 8) & IDT24x_REG_DSM_INT_8_MASK,
+ chip->reg_dsm_int_8, IDT24x_REG_DSM_INT_8_MASK);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSM_INT_8: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting DSM_INT_7_0 (val %u @ 0x%x)",
+ __func__, chip->divs.dsmint & 0xFF,
+ IDT24x_REG_DSM_INT_7_0);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_DSM_INT_7_0,
+ chip->divs.dsmint & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSM_INT_7_0: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_DSMFRAC_20_16 (val %u @ 0x%x)",
+ __func__, chip->divs.dsmfrac >> 16,
+ IDT24x_REG_DSMFRAC_20_16);
+ err = i2cwritewithmask(
+ client, chip->regmap, IDT24x_REG_DSMFRAC_20_16,
+ (chip->divs.dsmfrac >> 16) & IDT24x_REG_DSMFRAC_20_16_MASK,
+ chip->reg_dsm_int_8, IDT24x_REG_DSMFRAC_20_16_MASK);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSMFRAC_20_16: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_DSMFRAC_15_8 (val %u @ 0x%x)",
+ __func__, (chip->divs.dsmfrac >> 8) & 0xFF,
+ IDT24x_REG_DSMFRAC_15_8);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_DSMFRAC_15_8,
+ (chip->divs.dsmfrac >> 8) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSMFRAC_15_8: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_DSMFRAC_7_0 (val %u @ 0x%x)",
+ __func__, chip->divs.dsmfrac & 0xFF,
+ IDT24x_REG_DSMFRAC_7_0);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_DSMFRAC_7_0,
+ chip->divs.dsmfrac & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSMFRAC_7_0: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_NS1_Q0 (val %u @ 0x%x)",
+ __func__, chip->divs.ns1_q0, IDT24x_REG_NS1_Q0);
+ err = i2cwritewithmask(
+ client, chip->regmap, IDT24x_REG_NS1_Q0,
+ chip->divs.ns1_q0 & IDT24x_REG_NS1_Q0_MASK,
+ chip->reg_ns1_q0, IDT24x_REG_NS1_Q0_MASK);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_NS1_Q0: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_NS2_Q0_15_8 (val %u @ 0x%x)",
+ __func__, (chip->divs.ns2_q0 >> 8) & 0xFF,
+ IDT24x_REG_NS2_Q0_15_8);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_NS2_Q0_15_8,
+ (chip->divs.ns2_q0 >> 8) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_NS2_Q0_15_8: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_NS2_Q0_7_0 (val %u @ 0x%x)",
+ __func__, chip->divs.ns2_q0 & 0xFF,
+ IDT24x_REG_NS2_Q0_7_0);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_NS2_Q0_7_0,
+ chip->divs.ns2_q0 & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_NS2_Q0_7_0: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: calling idt24x_enable_output for Q0. requestedFreq: %u",
+ __func__, chip->clk[0].requested);
+ idt24x_enable_output(chip, 0, chip->clk[0].requested != 0);
+
+ dev_dbg(&client->dev,
+ "%s: writing values for q1-q3", __func__);
+ for (x = 1; x < NUM_OUTPUTS; x++) {
+ struct clk_register_offsets offsets;
+
+ if (chip->clk[x].requested != 0) {
+ dev_dbg(&client->dev,
+ "%s: calling idt24x_get_offsets for %u",
+ __func__, x);
+ err = idt24x_get_offsets(x, &offsets);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error calling idt24x_get_offsets: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: (q%u, nint: %u, nfrac: %u)",
+ __func__, x, chip->divs.nint[x - 1],
+ chip->divs.nfrac[x - 1]);
+
+ dev_dbg(&client->dev,
+ "%s: setting n_17_16_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ chip->divs.nint[x - 1] >> 16,
+ offsets.n_17_16_offset);
+ err = i2cwritewithmask(
+ client, chip->regmap, offsets.n_17_16_offset,
+ (chip->divs.nint[x - 1] >> 16) &
+ offsets.n_17_16_mask,
+ chip->reg_n_qx_17_16[x - 1],
+ offsets.n_17_16_mask);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting n_17_16_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting n_15_8_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ (chip->divs.nint[x - 1] >> 8) & 0xFF,
+ offsets.n_15_8_offset);
+ err = i2cwrite(
+ client, chip->regmap, offsets.n_15_8_offset,
+ (chip->divs.nint[x - 1] >> 8) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting n_15_8_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting n_7_0_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ chip->divs.nint[x - 1] & 0xFF,
+ offsets.n_7_0_offset);
+ err = i2cwrite(
+ client, chip->regmap, offsets.n_7_0_offset,
+ chip->divs.nint[x - 1] & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting n_7_0_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting nfrac_27_24_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ (chip->divs.nfrac[x - 1] >> 24),
+ offsets.nfrac_27_24_offset);
+ err = i2cwritewithmask(
+ client, chip->regmap,
+ offsets.nfrac_27_24_offset,
+ (chip->divs.nfrac[x - 1] >> 24) &
+ offsets.nfrac_27_24_mask,
+ chip->reg_nfrac_qx_27_24[x - 1],
+ offsets.nfrac_27_24_mask);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting nfrac_27_24_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting nfrac_23_16_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ (chip->divs.nfrac[x - 1] >> 16) & 0xFF,
+ offsets.nfrac_23_16_offset);
+ err = i2cwrite(
+ client, chip->regmap,
+ offsets.nfrac_23_16_offset,
+ (chip->divs.nfrac[x - 1] >> 16) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting nfrac_23_16_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting nfrac_15_8_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ (chip->divs.nfrac[x - 1] >> 8) & 0xFF,
+ offsets.nfrac_15_8_offset);
+ err = i2cwrite(
+ client, chip->regmap,
+ offsets.nfrac_15_8_offset,
+ (chip->divs.nfrac[x - 1] >> 8) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting nfrac_15_8_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting nfrac_7_0_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ chip->divs.nfrac[x - 1] & 0xFF,
+ offsets.nfrac_7_0_offset);
+ err = i2cwrite(
+ client, chip->regmap, offsets.nfrac_7_0_offset,
+ chip->divs.nfrac[x - 1] & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting nfrac_7_0_offset: %i",
+ __func__, err);
+ return err;
+ }
+ }
+ idt24x_enable_output(chip, x,
+ chip->clk[x].requested != 0);
+ chip->clk[x].actual = chip->clk[x].requested;
+ }
+ return 0;
+}
+
+/**
+ * idt24x_set_frequency - Adjust output frequency on the attached chip.
+ * @chip: Device data structure, including all requested frequencies.
+ *
+ * Return: 0 on success.
+ */
+int idt24x_set_frequency(struct clk_idt24x_chip *chip)
+{
+ int err;
+ struct i2c_client *client = chip->i2c_client;
+ int x;
+ bool all_disabled = true;
+
+ for (x = 0; x < NUM_OUTPUTS; x++) {
+ if (chip->clk[x].requested == 0) {
+ idt24x_enable_output(chip, x, false);
+ chip->clk[x].actual = 0;
+ } else {
+ all_disabled = false;
+ }
+ }
+
+ if (all_disabled)
+ /*
+ * no requested frequencies, so nothing else to calculate
+ * or write to the chip. If the consumer wants to disable
+ * all outputs, they can request 0 for all frequencies.
+ */
+ return 0;
+
+ if (chip->input_clk_freq == 0) {
+ dev_err(&client->dev,
+ "%s: no input frequency; can't continue.", __func__);
+ return -EINVAL;
+ }
+
+ err = idt24x_calc_divs(chip);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error calling idt24x_calc_divs: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = idt24x_update_device(chip);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error updating the device: %i",
+ __func__, err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/clk/idt/clk-idt8t49n24x-core.h b/drivers/clk/idt/clk-idt8t49n24x-core.h
new file mode 100644
index 000000000000..247ec070c621
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x-core.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* clk-idt8t49n24x-core.h - Program 8T49N24x settings via I2C (common code)
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#ifndef __IDT_CLK_IDT8T49N24X_CORE_H_
+#define __IDT_CLK_IDT8T49N24X_CORE_H_
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+/*
+ * The configurations in the settings file have 0x317 registers (last offset
+ * is 0x316).
+ */
+#define NUM_CONFIG_REGISTERS 0x317
+#define NUM_INPUTS 2
+#define NUM_OUTPUTS 4
+#define DEBUGFS_BUFFER_LENGTH 200
+#define WRITE_BLOCK_SIZE 32
+
+/* Non output-specific registers */
+#define IDT24x_REG_DBL_DIS 0x6C
+#define IDT24x_REG_DBL_DIS_MASK 0x01
+#define IDT24x_REG_DSM_INT_8 0x25
+#define IDT24x_REG_DSM_INT_8_MASK 0x01
+#define IDT24x_REG_DSM_INT_7_0 0x26
+#define IDT24x_REG_DSMFRAC_20_16 0x28
+#define IDT24x_REG_DSMFRAC_20_16_MASK 0x1F
+#define IDT24x_REG_DSMFRAC_15_8 0x29
+#define IDT24x_REG_DSMFRAC_7_0 0x2A
+#define IDT24x_REG_OUTEN 0x39
+#define IDT24x_REG_OUTMODE0_1 0x3E
+#define IDT24x_REG_OUTMODE2_3 0x3D
+#define IDT24x_REG_Q_DIS 0x6F
+
+/* Q0 */
+#define IDT24x_REG_OUTEN0_MASK 0x01
+#define IDT24x_REG_OUTMODE0_MASK 0x0E
+#define IDT24x_REG_Q0_DIS_MASK 0x01
+#define IDT24x_REG_NS1_Q0 0x3F
+#define IDT24x_REG_NS1_Q0_MASK 0x03
+#define IDT24x_REG_NS2_Q0_15_8 0x40
+#define IDT24x_REG_NS2_Q0_7_0 0x41
+
+/* Q1 */
+#define IDT24x_REG_OUTEN1_MASK 0x02
+#define IDT24x_REG_OUTMODE1_MASK 0xE0
+#define IDT24x_REG_Q1_DIS_MASK 0x02
+#define IDT24x_REG_N_Q1_17_16 0x42
+#define IDT24x_REG_N_Q1_17_16_MASK 0x03
+#define IDT24x_REG_N_Q1_15_8 0x43
+#define IDT24x_REG_N_Q1_7_0 0x44
+#define IDT24x_REG_NFRAC_Q1_27_24 0x57
+#define IDT24x_REG_NFRAC_Q1_27_24_MASK 0x0F
+#define IDT24x_REG_NFRAC_Q1_23_16 0x58
+#define IDT24x_REG_NFRAC_Q1_15_8 0x59
+#define IDT24x_REG_NFRAC_Q1_7_0 0x5A
+
+/* Q2 */
+#define IDT24x_REG_OUTEN2_MASK 0x04
+#define IDT24x_REG_OUTMODE2_MASK 0x0E
+#define IDT24x_REG_Q2_DIS_MASK 0x04
+#define IDT24x_REG_N_Q2_17_16 0x45
+#define IDT24x_REG_N_Q2_17_16_MASK 0x03
+#define IDT24x_REG_N_Q2_15_8 0x46
+#define IDT24x_REG_N_Q2_7_0 0x47
+#define IDT24x_REG_NFRAC_Q2_27_24 0x5B
+#define IDT24x_REG_NFRAC_Q2_27_24_MASK 0x0F
+#define IDT24x_REG_NFRAC_Q2_23_16 0x5C
+#define IDT24x_REG_NFRAC_Q2_15_8 0x5D
+#define IDT24x_REG_NFRAC_Q2_7_0 0x5E
+
+/* Q3 */
+#define IDT24x_REG_OUTEN3_MASK 0x08
+#define IDT24x_REG_OUTMODE3_MASK 0xE0
+#define IDT24x_REG_Q3_DIS_MASK 0x08
+#define IDT24x_REG_N_Q3_17_16 0x48
+#define IDT24x_REG_N_Q3_17_16_MASK 0x03
+#define IDT24x_REG_N_Q3_15_8 0x49
+#define IDT24x_REG_N_Q3_7_0 0x4A
+#define IDT24x_REG_NFRAC_Q3_27_24 0x5F
+#define IDT24x_REG_NFRAC_Q3_27_24_MASK 0x0F
+#define IDT24x_REG_NFRAC_Q3_23_16 0x60
+#define IDT24x_REG_NFRAC_Q3_15_8 0x61
+#define IDT24x_REG_NFRAC_Q3_7_0 0x62
+
+/**
+ * struct idt24x_output - device output information
+ * @hw: hw registration info for this specific output clcok. This gets
+ * passed as an argument to CCF api calls (e.g., set_rate).
+ * container_of can then be used to get the reference to this
+ * struct.
+ * @chip: store a reference to the parent device structure. container_of
+ * cannot be used to get to the parent device structure from
+ * idt24x_output, because clk_idt24x_chip contains an array of
+ * output structs (for future enhancements to support devices
+ * with different numbers of output clocks).
+ * @index: identifies output on the chip; used in debug statements
+ * @requested: requested output clock frequency (in Hz)
+ * @actual: actual output clock frequency (in Hz). Will only be set after
+ * successful update of the device.
+ * @debug_freq: stores value for debugfs file. Use this instead of requested
+ * struct var because debugfs expects u64, not u32.
+ */
+struct idt24x_output {
+ struct clk_hw hw;
+ struct clk_idt24x_chip *chip;
+ u8 index;
+ u32 requested;
+ u32 actual;
+ u64 debug_freq;
+};
+
+/**
+ * struct idt24x_dividers - output dividers
+ * @dsmint: int component of feedback divider for VCO (2-stage divider)
+ * @dsmfrac: fractional component of feedback divider for VCO
+ * @ns1_q0: ns1 divider component for Q0
+ * @ns2_q0: ns2 divider component for Q0
+ * @nint: int divider component for Q1-3
+ * @nfrac: fractional divider component for Q1-3
+ */
+struct idt24x_dividers {
+ u16 dsmint;
+ u32 dsmfrac;
+
+ u8 ns1_q0;
+ u16 ns2_q0;
+
+ u32 nint[3];
+ u32 nfrac[3];
+};
+
+/**
+ * struct clk_idt24x_chip - device info for chip
+ * @regmap: register map used to perform i2c writes to the chip
+ * @i2c_client: i2c_client struct passed to probe
+ * @min_freq: min frequency for this chip
+ * @max_freq: max frequency for this chip
+ * @settings: filled in if full register map is specified in the DT
+ * @has_settings: true if settings array is valid
+ * @input_clk: ptr to input clock specified in DT
+ * @input_clk_num: which input clock was specified. 0-based. A value of
+ * NUM_INPUTS indicates that a XTAL is used as the input.
+ * @input_clk_nb: notification support (if input clk changes)
+ * @input_clk_freq: current freq of input_clk
+ * @doubler_disabled: whether input doubler is enabled. This value is read
+ * from the hw on probe (in case it is set in @settings).
+ * @clk: array of outputs. One entry per output supported by the
+ * chip. Frequencies requested via the ccf api will be
+ * recorded in this array.
+ * @reg_dsm_int_8: record current value from hw to avoid modifying
+ * when writing register values
+ * @reg_dsm_frac_20_16: record current value
+ * @reg_out_en_x: record current value
+ * @reg_out_mode_0_1: record current value
+ * @reg_out_mode_2_3: record current value
+ * @reg_qx_dis: record current value
+ * @reg_ns1_q0: record current value
+ * @reg_n_qx_17_16: record current value
+ * @reg_nfrac_qx_27_24: record current value
+ * @divs: output divider values for all outputs
+ * @debugfs_dirroot: debugfs support
+ * @debugfs_fileaction: debugfs support
+ * @debugfs_filei2c: debugfs support
+ * @debugfs_map: debugfs support
+ * @dbg_cache: debugfs support
+ * @debugfs_fileqfreq: debugfs support
+ */
+struct clk_idt24x_chip {
+ struct regmap *regmap;
+ struct i2c_client *i2c_client;
+
+ u32 min_freq;
+ u32 max_freq;
+
+ u8 settings[NUM_CONFIG_REGISTERS];
+
+ bool has_settings;
+
+ struct clk *input_clk;
+ int input_clk_num;
+ struct notifier_block input_clk_nb;
+ u32 input_clk_freq;
+
+ bool doubler_disabled;
+
+ struct idt24x_output clk[NUM_OUTPUTS];
+
+ unsigned int reg_dsm_int_8;
+ unsigned int reg_dsm_frac_20_16;
+ unsigned int reg_out_en_x;
+ unsigned int reg_out_mode_0_1;
+ unsigned int reg_out_mode_2_3;
+ unsigned int reg_qx_dis;
+ unsigned int reg_ns1_q0;
+ unsigned int reg_n_qx_17_16[3];
+ unsigned int reg_nfrac_qx_27_24[3];
+
+ struct idt24x_dividers divs;
+
+ struct dentry *debugfs_dirroot, *debugfs_fileaction, *debugfs_filei2c,
+ *debugfs_map;
+ char dbg_cache[DEBUGFS_BUFFER_LENGTH];
+ struct dentry *debugfs_fileqfreq[4];
+};
+
+#define to_idt24x_output(_hw) \
+ container_of(_hw, struct idt24x_output, hw)
+#define to_clk_idt24x_from_client(_client) \
+ container_of(_client, struct clk_idt24x_chip, i2c_client)
+#define to_clk_idt24x_from_nb(_nb) \
+ container_of(_nb, struct clk_idt24x_chip, input_clk_nb)
+
+/**
+ * struct clk_register_offsets - register offsets for current context
+ * @oe_offset: offset for current output enable and mode
+ * @oe_mask: mask for current output enable
+ * @dis_mask: mask for current output disable
+ * @n_17_16_offset: offset for current output int divider (bits 17:16)
+ * @n_17_16_mask: mask for current output int divider (bits 17:16)
+ * @n_15_8_offset: offset for current output int divider (bits 15:8)
+ * @n_7_0_offset: offset for current output int divider (bits 7:0)
+ * @nfrac_27_24_offset: offset for current output frac divider (bits 27:24)
+ * @nfrac_27_24_mask: mask for current output frac divider (bits 27:24)
+ * @nfrac_23_16_offset: offset for current output frac divider (bits 23:16)
+ * @nfrac_15_8_offset: offset for current output frac divider (bits 15:8)
+ * @nfrac_7_0_offset: offset for current output frac divider (bits 7:0)
+ * @ns1_offset: offset for stage 1 div for output Q0
+ * @ns1_offset_mask: mask for stage 1 div for output Q0
+ * @ns2_15_8_offset: offset for stage 2 div for output Q0 (bits 15:8)
+ * @ns2_7_0_offset: offset for stage 2 div for output Q0 (bits 7:0)
+ */
+struct clk_register_offsets {
+ u16 oe_offset;
+ u8 oe_mask;
+ u8 dis_mask;
+
+ u16 n_17_16_offset;
+ u8 n_17_16_mask;
+ u16 n_15_8_offset;
+ u16 n_7_0_offset;
+ u16 nfrac_27_24_offset;
+ u8 nfrac_27_24_mask;
+ u16 nfrac_23_16_offset;
+ u16 nfrac_15_8_offset;
+ u16 nfrac_7_0_offset;
+
+ u16 ns1_offset;
+ u8 ns1_offset_mask;
+ u16 ns2_15_8_offset;
+ u16 ns2_7_0_offset;
+};
+
+int bits_to_shift(unsigned int mask);
+int i2cwritebulk(
+ struct i2c_client *client, struct regmap *map,
+ unsigned int reg, u8 val[], size_t val_count);
+int idt24x_get_offsets(
+ u8 output_num,
+ struct clk_register_offsets *offsets);
+int idt24x_set_frequency(struct clk_idt24x_chip *chip);
+
+#endif /* __IDT_CLK_IDT8T49N24X_CORE_H_ */
diff --git a/drivers/clk/idt/clk-idt8t49n24x-debugfs.c b/drivers/clk/idt/clk-idt8t49n24x-debugfs.c
new file mode 100644
index 000000000000..967a9df8701c
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x-debugfs.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/* clk-idt8t49n24x-debugfs.c - Debugfs support for 8T49N24x
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "clk-idt8t49n24x-debugfs.h"
+
+static struct clk_idt24x_chip *idt24x_chip_fordebugfs;
+
+static int idt24x_read_all_settings(
+ struct clk_idt24x_chip *chip, char *output_buffer, int count)
+{
+ u8 settings[NUM_CONFIG_REGISTERS];
+ int err = 0;
+ int x;
+
+ err = regmap_bulk_read(
+ chip->regmap, 0x0, settings, NUM_CONFIG_REGISTERS);
+ if (!err) {
+ output_buffer[0] = '\0';
+ for (x = 0; x < ARRAY_SIZE(settings); x++) {
+ char dbg[4];
+
+ if ((strlen(output_buffer) + 4) > count)
+ return -EINVAL;
+ sprintf(dbg, "%02x ", settings[x]);
+ strcat(output_buffer, dbg);
+ }
+ }
+ return err;
+}
+
+/**
+ * idt24x_debugfs_writer_action - Write handler for the "action" debugfs file.
+ * @fp: file pointer
+ * @user_buffer: buffer of text written to file
+ * @count: size of text in buffer
+ * @position: pass in current position, return new position
+ *
+ * Return: result of call to simple_write_to_buffer
+ *
+ * Use the "action" file as a trigger for setting all requested
+ * rates. The driver doesn't get any notification when the files
+ * representing the Qx outputs are written to, so something else is
+ * needed to notify the driver that the device should be udpated.
+ *
+ * It doesn't matter what you write to the action debugs file. When the
+ * handler is called, the device will be updated.
+ */
+static ssize_t idt24x_debugfs_writer_action(
+ struct file *fp, const char __user *user_buffer,
+ size_t count, loff_t *position)
+{
+ int err = 0;
+ int x;
+ u32 freq;
+ bool needs_update = true;
+ struct i2c_client *client = idt24x_chip_fordebugfs->i2c_client;
+
+ if (count > DEBUGFS_BUFFER_LENGTH)
+ return -EINVAL;
+
+ for (x = 0; x < NUM_OUTPUTS; x++) {
+ freq = idt24x_chip_fordebugfs->clk[x].debug_freq;
+ if (freq) {
+ needs_update = false;
+ dev_dbg(&client->dev,
+ "%s: calling clk_set_rate with debug frequency for Q%i",
+ __func__, x);
+ err = clk_set_rate(
+ idt24x_chip_fordebugfs->clk[x].hw.clk, freq);
+ if (err) {
+ dev_err(&client->dev,
+ "error calling clk_set_rate for Q%i (%i)\n",
+ x, err);
+ }
+ } else {
+ needs_update = true;
+ idt24x_chip_fordebugfs->clk[x].requested = 0;
+ dev_dbg(&client->dev,
+ "%s: debug frequency for Q%i not set; make sure clock is disabled",
+ __func__, x);
+ }
+ }
+
+ if (needs_update) {
+ dev_dbg(&client->dev,
+ "%s: calling idt24x_set_frequency to ensure any clocks that should be disabled are turned off.",
+ __func__);
+ err = idt24x_set_frequency(idt24x_chip_fordebugfs);
+ if (err) {
+ dev_err(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "%s: error calling idt24x_set_frequency (%i)\n",
+ __func__, err);
+ return err;
+ }
+ }
+
+ return simple_write_to_buffer(
+ idt24x_chip_fordebugfs->dbg_cache, DEBUGFS_BUFFER_LENGTH,
+ position, user_buffer, count);
+}
+
+/**
+ * idt24x_debugfs_reader_action - Read the "action" debugfs file.
+ * @fp: file pointer
+ * @user_buffer: buffer of text written to file
+ * @count: size of text in buffer
+ * @position: pass in current position, return new position
+ *
+ * Return: whatever was last written to the "action" debugfs file.
+ */
+static ssize_t idt24x_debugfs_reader_action(
+ struct file *fp, char __user *user_buffer, size_t count,
+ loff_t *position)
+{
+ return simple_read_from_buffer(
+ user_buffer, count, position, idt24x_chip_fordebugfs->dbg_cache,
+ DEBUGFS_BUFFER_LENGTH);
+}
+
+/**
+ * idt24x_debugfs_reader_map - display the current registers on the device
+ * @fp: file pointer
+ * @user_buffer: buffer of text written to file
+ * @count: size of text in buffer
+ * @position: pass in current position, return new position
+ *
+ * Reads the current register map from the attached chip via I2C and
+ * returns it.
+ *
+ * Return: result of call to simple_read_from_buffer
+ */
+static ssize_t idt24x_debugfs_reader_map(
+ struct file *fp, char __user *user_buffer, size_t count,
+ loff_t *position)
+{
+ int err = 0;
+ char *buf = kzalloc(5000, GFP_KERNEL);
+
+ dev_dbg(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "calling idt24x_read_all_settings (count: %zu)\n", count);
+ err = idt24x_read_all_settings(idt24x_chip_fordebugfs, buf, 5000);
+ if (err) {
+ dev_err(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "error calling idt24x_read_all_settings (%i)\n", err);
+ return 0;
+ }
+ /* TMGCDR-1456. We're returning 1 byte too few. */
+ err = simple_read_from_buffer(
+ user_buffer, count, position, buf, strlen(buf));
+ kfree(buf);
+ return err;
+}
+
+/**
+ * idt24x_handle_i2c_debug_token - process "token" written to the i2c file
+ * @dev: pointer to device structure
+ * @token: pointer to current char being examined
+ * @reg: pass in current register, or return register from token.
+ * @val: resulting array of bytes being parsed
+ * @nextbyte: position in val array to store next byte
+ *
+ * Utility function to operate on the current "token" (from within a
+ * space-delimited string) written to the i2c debugfs file. It will
+ * either be a register offset or a byte to be added to the val array.
+ * If it is added to the val array, auto-increment nextbyte.
+ *
+ * Return: 0 for success
+ */
+static int idt24x_handle_i2c_debug_token(
+ const struct device *dev, char *token, unsigned int *reg,
+ u8 val[], u16 *nextbyte)
+{
+ int err = 0;
+
+ dev_dbg(dev, "got token (%s)\n", token);
+ if (*reg == -1) {
+ err = kstrtouint(token, 16, reg);
+ if (!err)
+ dev_dbg(dev, "hex register address == 0x%x\n", *reg);
+ } else {
+ u8 temp;
+
+ err = kstrtou8(token, 16, &temp);
+ if (!err) {
+ dev_dbg(dev, "data byte == 0x%x\n", temp);
+ val[*nextbyte] = temp;
+ *nextbyte += 1;
+ }
+ }
+ if (err == -ERANGE)
+ dev_err(dev, "ERANGE error when parsing data\n");
+ else if (err == -EINVAL)
+ dev_err(dev, "EINVAL error when parsing data\n");
+ else if (err)
+ dev_err(dev, "error when parsing data: %i\n", err);
+ return err;
+}
+
+/**
+ * idt24x_debugfs_writer_i2c - debugfs handler for i2c file
+ * @fp: file pointer
+ * @user_buffer: buffer of text written to file
+ * @count: size of text in buffer
+ * @position: pass in current position, return new position
+ *
+ * Handler for the "i2c" debugfs file. Write to this file to write bytes
+ * via I2C to a particular offset.
+ *
+ * Usage: echo 006c 01 02 0D FF > i2c
+ *
+ * First 4 chars are the 2-byte i2c register offset. Then follow that
+ * with a sequence of 2-char bytes in hex format that you want to write
+ * starting at that offset.
+ *
+ * Return: result of simple_write_to_buffer
+ */
+static ssize_t idt24x_debugfs_writer_i2c(struct file *fp,
+ const char __user *user_buffer,
+ size_t count, loff_t *position)
+{
+ int err = 0;
+ int x = 0;
+ int start = 0;
+ ssize_t written;
+ unsigned int reg = -1;
+ u8 val[WRITE_BLOCK_SIZE];
+ u16 nextbyte = 0;
+ char token[16];
+
+ if (count > DEBUGFS_BUFFER_LENGTH)
+ return -EINVAL;
+
+ written = simple_write_to_buffer(
+ idt24x_chip_fordebugfs->dbg_cache, DEBUGFS_BUFFER_LENGTH,
+ position, user_buffer, count);
+ if (written != count) {
+ dev_dbg(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "write count != expected count");
+ return written;
+ }
+
+ for (x = 0; x < count; x++) {
+ token[x - start] = idt24x_chip_fordebugfs->dbg_cache[x];
+ if (idt24x_chip_fordebugfs->dbg_cache[x] == ' ') {
+ token[x - start] = '\0';
+ err = idt24x_handle_i2c_debug_token(
+ &idt24x_chip_fordebugfs->i2c_client->dev,
+ token, &reg, val, &nextbyte);
+ if (err)
+ break;
+ start = x + 1;
+ }
+ }
+
+ /* handle the last token */
+ if (!err) {
+ token[count - start] = '\0';
+ err = idt24x_handle_i2c_debug_token(
+ &idt24x_chip_fordebugfs->i2c_client->dev, token, &reg,
+ val, &nextbyte);
+ }
+
+ if (!err && reg != -1 && nextbyte > 0) {
+ err = i2cwritebulk(
+ idt24x_chip_fordebugfs->i2c_client,
+ idt24x_chip_fordebugfs->regmap,
+ reg, val, nextbyte);
+ if (err) {
+ dev_err(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "error writing data chip (%i)\n", err);
+ return err;
+ }
+ dev_dbg(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "successfully wrote i2c data to chip");
+ }
+
+ return written;
+}
+
+static const struct file_operations idt24x_fops_debug_action = {
+ .read = idt24x_debugfs_reader_action,
+ .write = idt24x_debugfs_writer_action,
+};
+
+static const struct file_operations idt24x_fops_debug_map = {
+ .read = idt24x_debugfs_reader_map
+};
+
+static const struct file_operations idt24x_fops_debug_i2c = {
+ .write = idt24x_debugfs_writer_i2c,
+};
+
+/**
+ * idt24x_expose_via_debugfs - Set up all debugfs files
+ * @client: pointer to i2c_client structure
+ * @chip: Device data structure
+ *
+ * Sets up all debugfs files to use for debugging the driver.
+ * Return: error code. 0 if success or debugfs doesn't appear to be enabled.
+ */
+int idt24x_expose_via_debugfs(struct i2c_client *client,
+ struct clk_idt24x_chip *chip)
+{
+ int output_num;
+
+ /*
+ * create root directory in /sys/kernel/debugfs
+ */
+ chip->debugfs_dirroot = debugfs_create_dir("idt24x", NULL);
+ if (!chip->debugfs_dirroot) {
+ /* debugfs probably not enabled. Don't fail the probe. */
+ return 0;
+ }
+
+ /*
+ * create files in the root directory. This requires read and
+ * write file operations
+ */
+ chip->debugfs_fileaction = debugfs_create_file(
+ "action", 0644, chip->debugfs_dirroot, NULL,
+ &idt24x_fops_debug_action);
+ if (!chip->debugfs_fileaction) {
+ dev_err(&client->dev,
+ "%s: error creating action file", __func__);
+ return (-ENODEV);
+ }
+
+ chip->debugfs_map = debugfs_create_file(
+ "map", 0444, chip->debugfs_dirroot, NULL,
+ &idt24x_fops_debug_map);
+ if (!chip->debugfs_map) {
+ dev_err(&client->dev,
+ "%s: error creating map file", __func__);
+ return (-ENODEV);
+ }
+
+ for (output_num = 0; output_num < NUM_OUTPUTS; output_num++) {
+ char name[5];
+
+ sprintf(name, "q%d", output_num);
+ chip->debugfs_fileqfreq[output_num] = debugfs_create_u64(
+ name, 0644, chip->debugfs_dirroot,
+ &chip->clk[output_num].debug_freq);
+ if (!chip->debugfs_fileqfreq[output_num]) {
+ dev_err(&client->dev,
+ "%s: error creating %s debugfs file",
+ __func__, name);
+ return (-ENODEV);
+ }
+ }
+
+ chip->debugfs_filei2c = debugfs_create_file(
+ "i2c", 0644, chip->debugfs_dirroot, NULL,
+ &idt24x_fops_debug_i2c);
+ if (!chip->debugfs_filei2c) {
+ dev_err(&client->dev,
+ "%s: error creating i2c file", __func__);
+ return (-ENODEV);
+ }
+
+ dev_dbg(&client->dev, "%s: success", __func__);
+ idt24x_chip_fordebugfs = chip;
+ return 0;
+}
+
+void idt24x_cleanup_debugfs(struct clk_idt24x_chip *chip)
+{
+ debugfs_remove_recursive(chip->debugfs_dirroot);
+}
diff --git a/drivers/clk/idt/clk-idt8t49n24x-debugfs.h b/drivers/clk/idt/clk-idt8t49n24x-debugfs.h
new file mode 100644
index 000000000000..673016c8e747
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x-debugfs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* clk-idt8t49n24x-debugfs.h - Debugfs support for 8T49N24x
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#ifndef __IDT_CLK_IDT8T49N24X_DEBUGFS_H_
+#define __IDT_CLK_IDT8T49N24X_DEBUGFS_H_
+
+#include "clk-idt8t49n24x-core.h"
+
+int idt24x_expose_via_debugfs(struct i2c_client *client,
+ struct clk_idt24x_chip *chip);
+void idt24x_cleanup_debugfs(struct clk_idt24x_chip *chip);
+
+#endif /* __IDT_CLK_IDT8T49N24X_DEBUGFS_H_*/
diff --git a/drivers/clk/idt/clk-idt8t49n24x.c b/drivers/clk/idt/clk-idt8t49n24x.c
new file mode 100644
index 000000000000..79289a5a1934
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0
+/* clk-idt8t49n24x.c - Program 8T49N24x settings via I2C.
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "clk-idt8t49n24x-core.h"
+#include "clk-idt8t49n24x-debugfs.h"
+
+#define OUTPUTMODE_HIGHZ 0
+#define OUTPUTMODE_LVDS 2
+#define IDT24x_MIN_FREQ 1000000L
+#define IDT24x_MAX_FREQ 300000000L
+#define DRV_NAME "idt8t49n24x"
+
+enum clk_idt24x_variant {
+ idt24x
+};
+
+static u32 mask_and_shift(u32 value, u8 mask)
+{
+ value &= mask;
+ return value >> bits_to_shift(mask);
+}
+
+/**
+ * idt24x_set_output_mode - Set the mode for a particular clock
+ * output in the register.
+ * @reg: The current register value before setting the mode.
+ * @mask: The bitmask identifying where in the register the
+ * output mode is stored.
+ * @mode: The mode to set.
+ *
+ * Return: the new register value with the specified mode bits set.
+ */
+static int idt24x_set_output_mode(u32 reg, u8 mask, u8 mode)
+{
+ if (((reg & mask) >> bits_to_shift(mask)) == OUTPUTMODE_HIGHZ) {
+ reg = reg & ~mask;
+ reg |= (OUTPUTMODE_LVDS << bits_to_shift(mask));
+ }
+ return reg;
+}
+
+/**
+ * idt24x_read_from_hw - Get the current values on the hw
+ * @chip: Device data structure
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int idt24x_read_from_hw(struct clk_idt24x_chip *chip)
+{
+ int err;
+ struct i2c_client *client = chip->i2c_client;
+ u32 tmp, tmp2;
+ u8 output;
+
+ err = regmap_read(chip->regmap, IDT24x_REG_DSM_INT_8,
+ &chip->reg_dsm_int_8);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_DSM_INT_8: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_dsm_int_8: 0x%x",
+ __func__, chip->reg_dsm_int_8);
+
+ err = regmap_read(chip->regmap, IDT24x_REG_DSMFRAC_20_16_MASK,
+ &chip->reg_dsm_frac_20_16);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_DSMFRAC_20_16_MASK: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_dsm_frac_20_16: 0x%x",
+ __func__, chip->reg_dsm_frac_20_16);
+
+ err = regmap_read(chip->regmap, IDT24x_REG_OUTEN, &chip->reg_out_en_x);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_OUTEN: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_out_en_x: 0x%x",
+ __func__, chip->reg_out_en_x);
+
+ err = regmap_read(chip->regmap, IDT24x_REG_OUTMODE0_1, &tmp);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_OUTMODE0_1: %i",
+ __func__, err);
+ return err;
+ }
+
+ tmp2 = idt24x_set_output_mode(
+ tmp, IDT24x_REG_OUTMODE0_MASK, OUTPUTMODE_LVDS);
+ tmp2 = idt24x_set_output_mode(
+ tmp2, IDT24x_REG_OUTMODE1_MASK, OUTPUTMODE_LVDS);
+ dev_dbg(&client->dev,
+ "%s: reg_out_mode_0_1 original: 0x%x. After setting OUT0/1 to LVDS if necessary: 0x%x",
+ __func__, tmp, tmp2);
+ chip->reg_out_mode_0_1 = tmp2;
+
+ err = regmap_read(chip->regmap, IDT24x_REG_OUTMODE2_3, &tmp);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_OUTMODE2_3: %i",
+ __func__, err);
+ return err;
+ }
+
+ tmp2 = idt24x_set_output_mode(
+ tmp, IDT24x_REG_OUTMODE2_MASK, OUTPUTMODE_LVDS);
+ tmp2 = idt24x_set_output_mode(
+ tmp2, IDT24x_REG_OUTMODE3_MASK, OUTPUTMODE_LVDS);
+ dev_dbg(&client->dev,
+ "%s: reg_out_mode_2_3 original: 0x%x. After setting OUT2/3 to LVDS if necessary: 0x%x",
+ __func__, tmp, tmp2);
+ chip->reg_out_mode_2_3 = tmp2;
+
+ err = regmap_read(chip->regmap, IDT24x_REG_Q_DIS, &chip->reg_qx_dis);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_Q_DIS: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_qx_dis: 0x%x",
+ __func__, chip->reg_qx_dis);
+
+ err = regmap_read(chip->regmap, IDT24x_REG_NS1_Q0, &chip->reg_ns1_q0);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_NS1_Q0: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_ns1_q0: 0x%x",
+ __func__, chip->reg_ns1_q0);
+
+ for (output = 1; output <= 3; output++) {
+ struct clk_register_offsets offsets;
+
+ err = idt24x_get_offsets(output, &offsets);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error calling idt24x_get_offsets: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = regmap_read(chip->regmap, offsets.n_17_16_offset,
+ &chip->reg_n_qx_17_16[output - 1]);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading n_17_16_offset for output %d (offset: 0x%x): %i",
+ __func__, output, offsets.n_17_16_offset, err);
+ return err;
+ }
+ dev_dbg(&client->dev,
+ "%s: reg_n_qx_17_16[Q%u]: 0x%x",
+ __func__, output, chip->reg_n_qx_17_16[output - 1]);
+
+ err = regmap_read(chip->regmap, offsets.nfrac_27_24_offset,
+ &chip->reg_nfrac_qx_27_24[output - 1]);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading nfrac_27_24_offset for output %d (offset: 0x%x): %i",
+ __func__, output,
+ offsets.nfrac_27_24_offset, err);
+ return err;
+ }
+ dev_dbg(&client->dev,
+ "%s: reg_nfrac_qx_27_24[Q%u]: 0x%x",
+ __func__, output,
+ chip->reg_nfrac_qx_27_24[output - 1]);
+ }
+
+ dev_info(&client->dev,
+ "%s: initial values read from chip successfully",
+ __func__);
+
+ /* Also read DBL_DIS to determine whether the doubler is disabled. */
+ err = regmap_read(chip->regmap, IDT24x_REG_DBL_DIS, &tmp);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_DBL_DIS: %i",
+ __func__, err);
+ return err;
+ }
+ chip->doubler_disabled = mask_and_shift(tmp, IDT24x_REG_DBL_DIS_MASK);
+ dev_dbg(&client->dev, "%s: doubler_disabled: %d",
+ __func__, chip->doubler_disabled);
+
+ return 0;
+}
+
+/**
+ * idt24x_set_rate - Sets the specified output clock to the specified rate.
+ * @hw: clk_hw struct that identifies the specific output clock.
+ * @rate: the rate (in Hz) for the specified clock.
+ * @parent_rate:(not sure) the rate for a parent signal (e.g.,
+ * the VCO feeding the output)
+ *
+ * This function will call idt24_set_frequency, which means it will
+ * calculate divider for all requested outputs and update the attached
+ * device (issue I2C commands to update the registers).
+ *
+ * Return: 0 on success.
+ */
+static int idt24x_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int err = 0;
+
+ /*
+ * hw->clk is the pointer to the specific output clock the user is
+ * requesting. We use hw to get back to the output structure for
+ * the output clock. Set the requested rate in the output structure.
+ * Note that container_of cannot be used to find the device structure
+ * (clk_idt24x_chip) from clk_hw, because clk_idt24x_chip has an array
+ * of idt24x_output structs. That is why it is necessary to use
+ * output->chip to access the device structure.
+ */
+ struct idt24x_output *output = to_idt24x_output(hw);
+ struct i2c_client *client = output->chip->i2c_client;
+
+ if (rate < output->chip->min_freq || rate > output->chip->max_freq) {
+ dev_err(&client->dev,
+ "requested frequency (%luHz) is out of range\n", rate);
+ return -EINVAL;
+ }
+
+ /*
+ * Set the requested frequency in the output data structure, and then
+ * call idt24x_set_frequency. idt24x_set_frequency considers all
+ * requested frequencies when deciding on a vco frequency and
+ * calculating dividers.
+ */
+ output->requested = rate;
+
+ /*
+ * Also set in the memory location used by the debugfs file
+ * that exposes the output clock frequency. That allows querying
+ * the current rate via debugfs.
+ */
+ output->debug_freq = rate;
+
+ dev_info(&client->dev,
+ "%s. calling idt24x_set_frequency for Q%u. rate: %lu",
+ __func__, output->index, rate);
+ err = idt24x_set_frequency(output->chip);
+
+ if (err != 0)
+ dev_err(&client->dev, "error calling set_frequency: %d", err);
+
+ return err;
+}
+
+/**
+ * idt24x_round_rate - get valid rate that is closest to the requested rate
+ * @hw: clk_hw struct that identifies the specific output clock.
+ * @rate: the rate (in Hz) for the specified clock.
+ * @parent_rate:(not sure) the rate for a parent signal (e.g., the VCO
+ * feeding the output). This is an i/o param.
+ * If the driver supports a parent clock for the output (e.g.,
+ * the VCO(?), then set this param to indicate what the rate of
+ * the parent would be (e.g., the VCO frequency) if the rounded
+ * rate is used.
+ *
+ * Returns the closest rate to the requested rate actually supported by the
+ * chip.
+ *
+ * Return: adjusted rate
+ */
+static long idt24x_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * The chip has fractional output dividers, so assume it
+ * can provide the requested rate.
+ *
+ * TODO: figure out the closest rate that chip can support
+ * within a low error threshold and return that rate.
+ */
+ return rate;
+}
+
+/**
+ * idt24x_recalc_rate - return the frequency being provided by the clock.
+ * @hw: clk_hw struct that identifies the specific output clock.
+ * @parent_rate: (not sure) the rate for a parent signal (e.g., the
+ * VCO feeding the output)
+ *
+ * This API appears to be used to read the current values from the hardware
+ * and report the frequency being provided by the clock. Without this function,
+ * the clock will be initialized to 0 by default. The OS appears to be
+ * calling this to find out what the current value of the clock is at
+ * startup, so it can determine when .set_rate is actually changing the
+ * frequency.
+ *
+ * Return: the frequency of the specified clock.
+ */
+static unsigned long idt24x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct idt24x_output *output = to_idt24x_output(hw);
+
+ return output->requested;
+}
+
+/*
+ * Note that .prepare and .unprepare appear to be used more in Gates.
+ * They do not appear to be necessary for this device.
+ * Instead, update the device when .set_rate is called.
+ */
+static const struct clk_ops idt24x_clk_ops = {
+ .recalc_rate = idt24x_recalc_rate,
+ .round_rate = idt24x_round_rate,
+ .set_rate = idt24x_set_rate,
+};
+
+static bool idt24x_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static bool idt24x_regmap_is_writeable(struct device *dev, unsigned int reg)
+{
+ return true;
+}
+
+static const struct regmap_config idt24x_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 0xff,
+ .writeable_reg = idt24x_regmap_is_writeable,
+ .volatile_reg = idt24x_regmap_is_volatile,
+};
+
+/**
+ * idt24x_clk_notifier_cb - Clock rate change callback
+ * @nb: Pointer to notifier block
+ * @event: Notification reason
+ * @data: Pointer to notification data object
+ *
+ * This function is called when the input clock frequency changes.
+ * The callback checks whether a valid bus frequency can be generated after the
+ * change. If so, the change is acknowledged, otherwise the change is aborted.
+ * New dividers are written to the HW in the pre- or post change notification
+ * depending on the scaling direction.
+ *
+ * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
+ * to acknowledge the change, NOTIFY_DONE if the notification is
+ * considered irrelevant.
+ */
+static int idt24x_clk_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct clk_idt24x_chip *chip = to_clk_idt24x_from_nb(nb);
+ int err = 0;
+
+ dev_info(&chip->i2c_client->dev,
+ "%s: input frequency changed: %lu Hz. event: %lu",
+ __func__, ndata->new_rate, event);
+
+ switch (event) {
+ case PRE_RATE_CHANGE: {
+ dev_dbg(&chip->i2c_client->dev, "PRE_RATE_CHANGE\n");
+ return NOTIFY_OK;
+ }
+ case POST_RATE_CHANGE:
+ chip->input_clk_freq = ndata->new_rate;
+ /*
+ * Can't call clock API clk_set_rate here; I believe
+ * it will be ignored if the rate is the same as we
+ * set previously. Need to call our internal function.
+ */
+ dev_dbg(&chip->i2c_client->dev,
+ "POST_RATE_CHANGE. Calling idt24x_set_frequency\n");
+ err = idt24x_set_frequency(chip);
+ if (err)
+ dev_err(&chip->i2c_client->dev,
+ "error calling idt24x_set_frequency (%i)\n",
+ err);
+ return NOTIFY_OK;
+ case ABORT_RATE_CHANGE:
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct clk_hw *of_clk_idt24x_get(
+ struct of_phandle_args *clkspec, void *_data)
+{
+ struct clk_idt24x_chip *chip = _data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= ARRAY_SIZE(chip->clk)) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &chip->clk[idx].hw;
+}
+
+/**
+ * idt24x_probe - main entry point for ccf driver
+ * @client: pointer to i2c_client structure
+ * @id: pointer to i2c_device_id structure
+ *
+ * Main entry point function that gets called to initialize the driver.
+ *
+ * Return: 0 for success.
+ */
+static int idt24x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct clk_idt24x_chip *chip;
+ struct clk_init_data init;
+
+ int err = 0;
+ int x;
+ char buf[6];
+
+ dev_info(&client->dev, "%s", __func__);
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ init.ops = &idt24x_clk_ops;
+ init.flags = 0;
+ init.num_parents = 0;
+ chip->i2c_client = client;
+
+ chip->min_freq = IDT24x_MIN_FREQ;
+ chip->max_freq = IDT24x_MAX_FREQ;
+
+ for (x = 0; x < NUM_INPUTS + 1; x++) {
+ char name[12];
+
+ sprintf(name, x == NUM_INPUTS ? "input-xtal" : "input-clk%i",
+ x);
+ dev_dbg(&client->dev, "attempting to get %s", name);
+ chip->input_clk = devm_clk_get(&client->dev, name);
+ if (IS_ERR(chip->input_clk)) {
+ err = PTR_ERR(chip->input_clk);
+ /*
+ * TODO: Handle EPROBE_DEFER error, which indicates
+ * that the input_clk isn't available now but may be
+ * later when the appropriate module is loaded.
+ */
+ } else {
+ err = 0;
+ chip->input_clk_num = x;
+ break;
+ }
+ }
+
+ if (err) {
+ dev_err(&client->dev, "Unable to get input clock (%u).", err);
+ chip->input_clk = NULL;
+ return err;
+ }
+
+ chip->input_clk_freq = clk_get_rate(chip->input_clk);
+ dev_dbg(&client->dev, "Got input-freq from input-clk in device tree: %uHz",
+ chip->input_clk_freq);
+
+ chip->input_clk_nb.notifier_call = idt24x_clk_notifier_cb;
+ if (clk_notifier_register(chip->input_clk, &chip->input_clk_nb))
+ dev_warn(&client->dev,
+ "Unable to register clock notifier for input_clk.");
+
+ dev_dbg(&client->dev, "%s: about to read settings: %zu",
+ __func__, ARRAY_SIZE(chip->settings));
+
+ err = of_property_read_u8_array(
+ client->dev.of_node, "settings", chip->settings,
+ ARRAY_SIZE(chip->settings));
+ if (!err) {
+ dev_dbg(&client->dev, "settings property specified in DT");
+ chip->has_settings = true;
+ } else {
+ if (err == -EOVERFLOW) {
+ dev_alert(&client->dev,
+ "EOVERFLOW error trying to read the settings. ARRAY_SIZE: %zu",
+ ARRAY_SIZE(chip->settings));
+ return err;
+ }
+ dev_dbg(&client->dev,
+ "settings property not specified in DT (or there was an error that can be ignored: %i). The settings property is optional.",
+ err);
+ }
+
+ /*
+ * Requested output frequencies cannot be specified in the DT.
+ * Either a consumer needs to use the clock API to request the rate,
+ * or use debugfs to set the rate from user space. Use clock-names in
+ * DT to specify the output clock.
+ */
+
+ chip->regmap = devm_regmap_init_i2c(client, &idt24x_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(chip->regmap);
+ }
+
+ dev_dbg(&client->dev, "%s: call i2c_set_clientdata", __func__);
+ i2c_set_clientdata(client, chip);
+
+ if (chip->has_settings) {
+ /*
+ * A raw settings array was specified in the DT. Write the
+ * settings to the device immediately.
+ */
+ err = i2cwritebulk(
+ chip->i2c_client, chip->regmap, 0, chip->settings,
+ ARRAY_SIZE(chip->settings));
+ if (err) {
+ dev_err(&client->dev,
+ "error writing all settings to chip (%i)\n",
+ err);
+ return err;
+ }
+ dev_dbg(&client->dev, "successfully wrote full settings array");
+ }
+
+ /*
+ * Whether or not settings were written to the device, read all
+ * current values from the hw.
+ */
+ dev_dbg(&client->dev, "read from HW");
+ err = idt24x_read_from_hw(chip);
+ if (err) {
+ dev_err(&client->dev,
+ "failed calling idt24x_read_from_hw (%i)\n", err);
+ return err;
+ }
+
+ /* Create all 4 clocks */
+ for (x = 0; x < NUM_OUTPUTS; x++) {
+ init.name = kasprintf(
+ GFP_KERNEL, "%s.Q%i", client->dev.of_node->name, x);
+ chip->clk[x].chip = chip;
+ chip->clk[x].hw.init = &init;
+ chip->clk[x].index = x;
+ err = devm_clk_hw_register(&client->dev, &chip->clk[x].hw);
+ kfree(init.name); /* clock framework made a copy of the name */
+ if (err) {
+ dev_err(&client->dev, "clock registration failed\n");
+ return err;
+ }
+ dev_dbg(&client->dev, "successfully registered Q%i", x);
+ }
+
+ if (err) {
+ dev_err(&client->dev, "clock registration failed\n");
+ return err;
+ }
+
+ err = of_clk_add_hw_provider(
+ client->dev.of_node, of_clk_idt24x_get, chip);
+ if (err) {
+ dev_err(&client->dev, "unable to add clk provider\n");
+ return err;
+ }
+
+ err = idt24x_expose_via_debugfs(client, chip);
+ if (err) {
+ dev_err(&client->dev,
+ "error calling idt24x_expose_via_debugfs: %i\n", err);
+ return err;
+ }
+
+ if (chip->input_clk_num == NUM_INPUTS)
+ sprintf(buf, "XTAL");
+ else
+ sprintf(buf, "CLK%i", chip->input_clk_num);
+ dev_info(&client->dev, "probe success. input freq: %uHz (%s), settings string? %s\n",
+ chip->input_clk_freq, buf,
+ chip->has_settings ? "true" : "false");
+ return 0;
+}
+
+static int idt24x_remove(struct i2c_client *client)
+{
+ struct clk_idt24x_chip *chip = to_clk_idt24x_from_client(&client);
+
+ dev_info(&client->dev, "%s", __func__);
+ of_clk_del_provider(client->dev.of_node);
+ idt24x_cleanup_debugfs(chip);
+
+ if (!chip->input_clk)
+ clk_notifier_unregister(
+ chip->input_clk, &chip->input_clk_nb);
+ return 0;
+}
+
+static const struct i2c_device_id idt24x_id[] = {
+ { "idt8t49n24x", idt24x },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, idt24x_id);
+
+static const struct of_device_id idt24x_of_match[] = {
+ { .compatible = "idt,idt8t49n241" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, idt24x_of_match);
+
+static struct i2c_driver idt24x_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = idt24x_of_match,
+ },
+ .probe = idt24x_probe,
+ .remove = idt24x_remove,
+ .id_table = idt24x_id,
+};
+
+module_i2c_driver(idt24x_driver);
+
+MODULE_DESCRIPTION("8T49N24x ccf driver");
+MODULE_AUTHOR("David Cater <david.cater@idt.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/si5324.h b/drivers/clk/si5324.h
new file mode 100644
index 000000000000..b3826e7b2f84
--- /dev/null
+++ b/drivers/clk/si5324.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Si5324 clock generator platform data
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_SI5324_H__
+#define __LINUX_PLATFORM_DATA_SI5324_H__
+
+/**
+ * enum si5324_pll_src - Si5324 pll clock source
+ *
+ * @SI5324_PLL_SRC_DEFAULT: Default, do not change eeprom config
+ * @SI5324_PLL_SRC_XTAL: Pll source clock is XTAL input
+ * @SI5324_PLL_SRC_CLKIN1: Pll source clock is CLKIN1 input
+ * @SI5324_PLL_SRC_CLKIN2: Pll source clock is CLKIN2 input
+ *
+ * Defines enums for clock sources.
+ */
+enum si5324_pll_src {
+ SI5324_PLL_SRC_XTAL = 0,
+ SI5324_PLL_SRC_CLKIN1 = 1,
+ SI5324_PLL_SRC_CLKIN2 = 2,
+};
+
+/**
+ * enum si5324_drive_strength - Si5324 clock output drive strength
+ *
+ * @SI5324_DRIVE_DEFAULT: Default, do not change eeprom config
+ * @SI5324_DRIVE_2MA: 2mA clock output drive strength
+ * @SI5324_DRIVE_4MA: 4mA clock output drive strength
+ * @SI5324_DRIVE_6MA: 6mA clock output drive strength
+ * @SI5324_DRIVE_8MA: 8mA clock output drive strength
+ *
+ * Defines enums for drive strength
+ */
+enum si5324_drive_strength {
+ SI5324_DRIVE_DEFAULT = 0,
+ SI5324_DRIVE_2MA = 2,
+ SI5324_DRIVE_4MA = 4,
+ SI5324_DRIVE_6MA = 6,
+ SI5324_DRIVE_8MA = 8,
+};
+
+/**
+ * struct si5324_clkout_config - Si5324 clock output configuration
+ *
+ * @drive: output drive strength
+ * @rate: clkout rate
+ */
+struct si5324_clkout_config {
+ enum si5324_drive_strength drive;
+ unsigned long rate;
+};
+
+/**
+ * struct si5324_platform_data - Platform data for the Si5324 clock driver
+ *
+ * @pll_src: Pll source clock setting
+ * @clkout: Array of clkout configuration
+ */
+struct si5324_platform_data {
+ enum si5324_pll_src pll_src;
+ struct si5324_clkout_config clkout[2];
+};
+
+#endif
diff --git a/drivers/clk/si5324drv.c b/drivers/clk/si5324drv.c
new file mode 100644
index 000000000000..5c064a329e73
--- /dev/null
+++ b/drivers/clk/si5324drv.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Si5324 clock driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar Rao G <vgannava.xilinx.com>
+ * Leon Woestenberg <leon@sidebranch.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include "si5324drv.h"
+
+/**
+ * si5324_rate_approx - Find closest rational approximation N2_LS/N3 fraction.
+ *
+ * @f: Holds the N2_LS/N3 fraction in 36.28 fixed point notation.
+ * @md: Holds the maximum denominator (N3) value allowed.
+ * @num: Store the numinator (N2_LS) found.
+ * @denom: Store the denominator (N3) found.
+ *
+ * This function finds the closest rational approximation.
+ * It allows only n/1 solution and as a part of the calculation
+ * multiply fraction until no digits after the decimal point and
+ * continued fraction and check denominator at each step.
+ */
+void si5324_rate_approx(u64 f, u64 md, u32 *num, u32 *denom)
+{
+ u64 a, h[3] = { 0, 1, 0 }, k[3] = { 1, 0, 0 };
+ u64 x, d, m, n = 1;
+ int i = 0;
+
+ if (md <= 1) {
+ *denom = 1;
+ *num = (u32)(f >> 28);
+ return;
+ }
+
+ n <<= 28;
+ for (i = 0; i < 28; i++) {
+ if ((f & 0x1) == 0) {
+ n >>= 1;
+ f >>= 1;
+ } else {
+ break;
+ }
+ }
+ d = f;
+
+ for (i = 0; i < 64; i++) {
+ a = n ? (div64_u64(d, n)) : 0;
+ if (i && !a)
+ break;
+ x = d;
+ d = n;
+ div64_u64_rem(x, n, &m);
+ n = m;
+ x = a;
+ if (k[1] * a + k[0] >= md) {
+ x = div64_u64((md - k[0]), k[1]);
+ if (x * 2 >= a || k[1] >= md)
+ i = 65;
+ else
+ break;
+ }
+ h[2] = x * h[1] + h[0];
+ h[0] = h[1];
+ h[1] = h[2];
+ k[2] = x * k[1] + k[0];
+ k[0] = k[1];
+ k[1] = k[2];
+ }
+
+ *denom = (u32)k[1];
+ *num = (u32)h[1];
+}
+
+/**
+ * si5324_find_n2ls - Search through the possible settings for the N2_LS.
+ *
+ * @settings: Holds the settings up till now.
+ *
+ * This function finds the best setting for N2_LS and N3n with the values
+ * for N1_HS, NCn_LS, and N2_HS.
+ *
+ * Return: 1 when the best possible result has been found, 0 on failure.
+ */
+static int si5324_find_n2ls(struct si5324_settingst *settings)
+{
+ u32 result = 0;
+ u64 f3_actual;
+ u64 fosc_actual;
+ u64 fout_actual;
+ u64 delta_fout;
+ u64 n2_ls_div_n3, mult_res;
+ u32 mult;
+
+ n2_ls_div_n3 = div64_u64(div64_u64(div64_u64(settings->fosc,
+ (settings->fin >> SI5324_FIN_FOUT_SHIFT)),
+ (u64)settings->n2_hs), (u64)2);
+
+ si5324_rate_approx(n2_ls_div_n3, settings->n31_max, &settings->n2_ls,
+ &settings->n31);
+ settings->n2_ls *= 2;
+
+ if (settings->n2_ls < settings->n2_ls_min) {
+ mult = div64_u64(settings->n2_ls_min, settings->n2_ls);
+ div64_u64_rem(settings->n2_ls_min, settings->n2_ls, &mult_res);
+ mult = mult_res ? mult + 1 : mult;
+ settings->n2_ls *= mult;
+ settings->n31 *= mult;
+ }
+
+ if (settings->n31 < settings->n31_min) {
+ mult = div64_u64(settings->n31_min, settings->n31);
+ div64_u64_rem(settings->n31_min, settings->n31, &mult_res);
+ mult = mult_res ? mult + 1 : mult;
+ settings->n2_ls *= mult;
+ settings->n31 *= mult;
+ }
+ pr_debug("Trying N2_LS = %d N3 = %d.\n", settings->n2_ls,
+ settings->n31);
+
+ if (settings->n2_ls < settings->n2_ls_min ||
+ settings->n2_ls > settings->n2_ls_max) {
+ pr_info("N2_LS out of range.\n");
+ } else if ((settings->n31 < settings->n31_min) ||
+ (settings->n31 > settings->n31_max)) {
+ pr_info("N3 out of range.\n");
+ } else {
+ f3_actual = div64_u64(settings->fin, settings->n31);
+ fosc_actual = f3_actual * settings->n2_hs * settings->n2_ls;
+ fout_actual = div64_u64(fosc_actual,
+ (settings->n1_hs * settings->nc1_ls));
+ delta_fout = fout_actual - settings->fout;
+
+ if ((f3_actual < ((u64)SI5324_F3_MIN) <<
+ SI5324_FIN_FOUT_SHIFT) ||
+ (f3_actual > ((u64)SI5324_F3_MAX) <<
+ SI5324_FIN_FOUT_SHIFT)) {
+ pr_debug("F3 frequency out of range.\n");
+ } else if ((fosc_actual < ((u64)SI5324_FOSC_MIN) <<
+ SI5324_FIN_FOUT_SHIFT) ||
+ (fosc_actual > ((u64)SI5324_FOSC_MAX) <<
+ SI5324_FIN_FOUT_SHIFT)) {
+ pr_debug("Fosc frequency out of range.\n");
+ } else if ((fout_actual < ((u64)SI5324_FOUT_MIN) <<
+ SI5324_FIN_FOUT_SHIFT) ||
+ (fout_actual > ((u64)SI5324_FOUT_MAX) <<
+ SI5324_FIN_FOUT_SHIFT)) {
+ pr_debug("Fout frequency out of range.\n");
+ } else {
+ pr_debug("Found solution: fout = %dHz delta = %dHz.\n",
+ (u32)(fout_actual >> SI5324_FIN_FOUT_SHIFT),
+ (u32)(delta_fout >> SI5324_FIN_FOUT_SHIFT));
+ pr_debug("fosc = %dkHz f3 = %dHz.\n",
+ (u32)((fosc_actual >> SI5324_FIN_FOUT_SHIFT) /
+ 1000),
+ (u32)(f3_actual >> SI5324_FIN_FOUT_SHIFT));
+
+ if (((u64)abs(delta_fout)) <
+ settings->best_delta_fout) {
+ settings->best_n1_hs = settings->n1_hs;
+ settings->best_nc1_ls = settings->nc1_ls;
+ settings->best_n2_hs = settings->n2_hs;
+ settings->best_n2_ls = settings->n2_ls;
+ settings->best_n3 = settings->n31;
+ settings->best_fout = fout_actual;
+ settings->best_delta_fout = abs(delta_fout);
+ if (delta_fout == 0)
+ result = 1;
+ }
+ }
+ }
+ return result;
+}
+
+/**
+ * si5324_find_n2 - Find a valid setting for N2_HS and N2_LS.
+ *
+ * @settings: Holds the settings up till now.
+ *
+ * This function finds a valid settings for N2_HS and N2_LS. Iterates over
+ * all possibilities of N2_HS and then performs a binary search over the
+ * N2_LS values.
+ *
+ * Return: 1 when the best possible result has been found.
+ */
+static int si5324_find_n2(struct si5324_settingst *settings)
+{
+ u32 result = 0;
+
+ for (settings->n2_hs = SI5324_N2_HS_MAX; settings->n2_hs >=
+ SI5324_N2_HS_MIN; settings->n2_hs--) {
+ pr_debug("Trying N2_HS = %d.\n", settings->n2_hs);
+ settings->n2_ls_min = (u32)(div64_u64(settings->fosc,
+ ((u64)(SI5324_F3_MAX * settings->n2_hs)
+ << SI5324_FIN_FOUT_SHIFT)));
+
+ if (settings->n2_ls_min < SI5324_N2_LS_MIN)
+ settings->n2_ls_min = SI5324_N2_LS_MIN;
+
+ settings->n2_ls_max = (u32)(div64_u64(settings->fosc,
+ ((u64)(SI5324_F3_MIN *
+ settings->n2_hs) <<
+ SI5324_FIN_FOUT_SHIFT)));
+ if (settings->n2_ls_max > SI5324_N2_LS_MAX)
+ settings->n2_ls_max = SI5324_N2_LS_MAX;
+
+ result = si5324_find_n2ls(settings);
+ if (result)
+ break;
+ }
+ return result;
+}
+
+/**
+ * si5324_calc_ncls_limits - Calculates the valid range for NCn_LS.
+ *
+ * @settings: Holds the input and output frequencies and the setting
+ * for N1_HS.
+ *
+ * This function calculates the valid range for NCn_LS with the value
+ * for the output frequency and N1_HS already set in settings.
+ *
+ * Return: -1 when there are no valid settings, 0 otherwise.
+ */
+int si5324_calc_ncls_limits(struct si5324_settingst *settings)
+{
+ settings->nc1_ls_min = div64_u64(settings->n1_hs_min,
+ settings->n1_hs);
+
+ if (settings->nc1_ls_min < SI5324_NC_LS_MIN)
+ settings->nc1_ls_min = SI5324_NC_LS_MIN;
+ if (settings->nc1_ls_min > 1 && (settings->nc1_ls_min & 0x1) == 1)
+ settings->nc1_ls_min++;
+ settings->nc1_ls_max = div64_u64(settings->n1_hs_max, settings->n1_hs);
+
+ if (settings->nc1_ls_max > SI5324_NC_LS_MAX)
+ settings->nc1_ls_max = SI5324_NC_LS_MAX;
+
+ if ((settings->nc1_ls_max & 0x1) == 1)
+ settings->nc1_ls_max--;
+ if ((settings->nc1_ls_max * settings->n1_hs < settings->n1_hs_min) ||
+ (settings->nc1_ls_min * settings->n1_hs > settings->n1_hs_max))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * si5324_find_ncls - Find a valid setting for NCn_LS
+ *
+ * @settings: Holds the input and output frequencies, the setting for
+ * N1_HS, and the limits for NCn_LS.
+ *
+ * This function find a valid setting for NCn_LS that can deliver the correct
+ * output frequency. Assumes that the valid range is relatively small
+ * so a full search can be done (should be true for video clock frequencies).
+ *
+ * Return: 1 when the best possible result has been found.
+ */
+static int si5324_find_ncls(struct si5324_settingst *settings)
+{
+ u64 fosc_1;
+ u32 result;
+
+ fosc_1 = settings->fout * settings->n1_hs;
+ for (settings->nc1_ls = settings->nc1_ls_min;
+ settings->nc1_ls <= settings->nc1_ls_max;) {
+ settings->fosc = fosc_1 * settings->nc1_ls;
+ pr_debug("Trying NCn_LS = %d: fosc = %dkHz.\n",
+ settings->nc1_ls,
+ (u32)(div64_u64((settings->fosc >>
+ SI5324_FIN_FOUT_SHIFT), 1000)));
+
+ result = si5324_find_n2(settings);
+ if (result)
+ break;
+ if (settings->nc1_ls == 1)
+ settings->nc1_ls++;
+ else
+ settings->nc1_ls += 2;
+ }
+ return result;
+}
+
+/**
+ * si5324_calcfreqsettings - Calculate the frequency settings
+ *
+ * @clkinfreq: Frequency of the input clock.
+ * @clkoutfreq: Desired output clock frequency.
+ * @clkactual: Actual clock frequency.
+ * @n1_hs: Set to the value for the N1_HS register.
+ * @ncn_ls: Set to the value for the NCn_LS register.
+ * @n2_hs: Set to the value for the N2_HS register.
+ * @n2_ls: Set to the value for the N2_LS register.
+ * @n3n: Set to the value for the N3n register.
+ * @bwsel: Set to the value for the BW_SEL register.
+ *
+ * This funciton calculates the frequency settings for the desired output
+ * frequency.
+ *
+ * Return: SI5324_SUCCESS for success, SI5324_ERR_FREQ when the
+ * requested frequency cannot be generated.
+ */
+int si5324_calcfreqsettings(u32 clkinfreq, u32 clkoutfreq, u32 *clkactual,
+ u8 *n1_hs, u32 *ncn_ls, u8 *n2_hs, u32 *n2_ls,
+ u32 *n3n, u8 *bwsel)
+{
+ struct si5324_settingst settings;
+ int result;
+
+ settings.fin = (u64)clkinfreq << SI5324_FIN_FOUT_SHIFT;
+ settings.fout = (u64)clkoutfreq << SI5324_FIN_FOUT_SHIFT;
+ settings.best_delta_fout = settings.fout;
+
+ settings.n1_hs_min = (int)(div64_u64(SI5324_FOSC_MIN, clkoutfreq));
+ if (settings.n1_hs_min < SI5324_N1_HS_MIN * SI5324_NC_LS_MIN)
+ settings.n1_hs_min = SI5324_N1_HS_MIN * SI5324_NC_LS_MIN;
+
+ settings.n1_hs_max = (int)(div64_u64(SI5324_FOSC_MAX, clkoutfreq));
+ if (settings.n1_hs_max > SI5324_N1_HS_MAX * SI5324_NC_LS_MAX)
+ settings.n1_hs_max = SI5324_N1_HS_MAX * SI5324_NC_LS_MAX;
+
+ settings.n31_min = div64_u64(clkinfreq, SI5324_F3_MAX);
+ if (settings.n31_min < SI5324_N3_MIN)
+ settings.n31_min = SI5324_N3_MIN;
+
+ settings.n31_max = div64_u64(clkinfreq, SI5324_F3_MIN);
+ if (settings.n31_max > SI5324_N3_MAX)
+ settings.n31_max = SI5324_N3_MAX;
+
+ /* Find a valid oscillator frequency with the highest setting of N1_HS
+ * possible (reduces power)
+ */
+ for (settings.n1_hs = SI5324_N1_HS_MAX;
+ settings.n1_hs >= SI5324_N1_HS_MIN; settings.n1_hs--) {
+ pr_debug("Trying N1_HS = %d.\n", settings.n1_hs);
+
+ result = si5324_calc_ncls_limits(&settings);
+ if (result) {
+ pr_debug("No valid settings\n");
+ continue;
+ }
+ result = si5324_find_ncls(&settings);
+ if (result)
+ break;
+ }
+
+ pr_debug("Si5324: settings.best_delta_fout = %llu\n",
+ (unsigned long long)settings.best_delta_fout);
+ pr_debug("Si5324: settings.fout = %llu\n",
+ (unsigned long long)settings.fout);
+
+ if (settings.best_delta_fout == settings.fout) {
+ pr_debug("Si5324: No valid settings found.");
+ return SI5324_ERR_FREQ;
+ }
+ pr_debug("Si5324: Found solution: fout = %dHz.\n",
+ (u32)(settings.best_fout >> 28));
+
+ /* Post processing: convert temporary values to actual registers */
+ *n1_hs = (u8)settings.best_n1_hs - 4;
+ *ncn_ls = settings.best_nc1_ls - 1;
+ *n2_hs = (u8)settings.best_n2_hs - 4;
+ *n2_ls = settings.best_n2_ls - 1;
+ *n3n = settings.best_n3 - 1;
+ /*
+ * How must the bandwidth selection be determined?
+ * Not all settings will be valid.
+ * refclk 2, 0xA2, BWSEL_REG=1010 (?)
+ * free running 2, 0x42, BWSEL_REG=0100 (?)
+ */
+ *bwsel = 6;
+
+ if (clkactual)
+ *clkactual = (settings.best_fout >> SI5324_FIN_FOUT_SHIFT);
+
+ return SI5324_SUCCESS;
+}
diff --git a/drivers/clk/si5324drv.h b/drivers/clk/si5324drv.h
new file mode 100644
index 000000000000..28ea3050d5fb
--- /dev/null
+++ b/drivers/clk/si5324drv.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Si5324 clock driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ */
+
+#ifndef SI5324DRV_H_
+#define SI5324DRV_H_
+
+#include <linux/types.h>
+
+/******************************************************************************
+ * User settable defines that depend on the specific board design.
+ * The defaults are for the Xilinx KC705 board.
+ *****************************************************************************/
+
+#define SI5324_XTAL_FREQ 114285000UL
+
+/******************************************************************************
+ * Defines independent on the specific board design. Should not be changed.
+ *****************************************************************************/
+
+#define SI5324_SUCCESS 0 /*< Operation was successful */
+#define SI5324_ERR_IIC -1 /*< IIC error occurred */
+#define SI5324_ERR_FREQ -2 /*< Could not calculate frequency setting */
+#define SI5324_ERR_PARM -3 /*< Invalid parameter */
+
+#define SI5324_CLKSRC_CLK1 1 /*< Use clock input 1 */
+#define SI5324_CLKSRC_CLK2 2 /*< Use clock input 2 */
+#define SI5324_CLKSRC_XTAL 3 /*< Use crystal (free running mode) */
+
+#define SI5324_FOSC_MIN 4850000000UL /*< Min oscillator frequency */
+#define SI5324_FOSC_MAX 5670000000UL /*< Max oscillator frequency */
+#define SI5324_F3_MIN 10000 /*< Min phase detector frequency */
+#define SI5324_F3_MAX 2000000 /*< Max phase detector frequency */
+#define SI5324_FIN_MIN 2000 /*< Min input frequency */
+#define SI5324_FIN_MAX 710000000UL /*< Max input frequency */
+#define SI5324_FOUT_MIN 2000 /*< Min output frequency */
+#define SI5324_FOUT_MAX 945000000UL /*< Max output frequency */
+
+#define SI5324_N1_HS_MIN 6
+#define SI5324_N1_HS_MAX 11
+#define SI5324_NC_LS_MIN 1
+#define SI5324_NC_LS_MAX 0x100000
+#define SI5324_N2_HS_MIN 4
+#define SI5324_N2_HS_MAX 11
+#define SI5324_N2_LS_MIN 2 /* even values only */
+#define SI5324_N2_LS_MAX 0x100000
+#define SI5324_N3_MIN 1
+#define SI5324_N3_MAX 0x080000
+#define SI5324_FIN_FOUT_SHIFT 28
+
+struct si5324_settingst {
+ /* high-speed output divider */
+ u32 n1_hs_min;
+ u32 n1_hs_max;
+ u32 n1_hs;
+
+ /* low-speed output divider for clkout1 */
+ u32 nc1_ls_min;
+ u32 nc1_ls_max;
+ u32 nc1_ls;
+
+ /* low-speed output divider for clkout2 */
+ u32 nc2_ls_min;
+ u32 nc2_ls_max;
+ u32 nc2_ls;
+
+ /* high-speed feedback divider (PLL multiplier) */
+ u32 n2_hs;
+ /* low-speed feedback divider (PLL multiplier) */
+ u32 n2_ls_min;
+ u32 n2_ls_max;
+ u32 n2_ls;
+
+ /* input divider for clk1 */
+ u32 n31_min;
+ u32 n31_max;
+ u32 n31;
+
+ u64 fin;
+ u64 fout;
+ u64 fosc;
+ u64 best_delta_fout;
+ u64 best_fout;
+ u32 best_n1_hs;
+ u32 best_nc1_ls;
+ u32 best_n2_hs;
+ u32 best_n2_ls;
+ u32 best_n3;
+};
+
+int si5324_calcfreqsettings(u32 clkinfreq, u32 clkoutfreq, u32 *clkactual,
+ u8 *n1_hs, u32 *ncn_ls, u8 *n2_hs,
+ u32 *n2_ls, u32 *n3n, u8 *bwsel);
+void si5324_rate_approx(u64 f, u64 md, u32 *num, u32 *denom);
+int si5324_calc_ncls_limits(struct si5324_settingst *settings);
+
+#endif /* SI5324DRV_H_ */
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index ffbb9008c1c9..f7cde869c7f7 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -37,6 +37,7 @@ static void __iomem *zynq_clkc_base;
#define SLCR_CAN_MIOCLK_CTRL (zynq_clkc_base + 0x60)
#define SLCR_DBG_CLK_CTRL (zynq_clkc_base + 0x64)
#define SLCR_PCAP_CLK_CTRL (zynq_clkc_base + 0x68)
+#define SLCR_TOPSW_CLK_CTRL (zynq_clkc_base + 0x6c)
#define SLCR_FPGA0_CLK_CTRL (zynq_clkc_base + 0x70)
#define SLCR_621_TRUE (zynq_clkc_base + 0xc4)
#define SLCR_SWDT_CLK_SEL (zynq_clkc_base + 0x204)
@@ -99,6 +100,48 @@ static const char *const gem1_emio_input_names[] __initconst = {
static const char *const swdt_ext_clk_input_names[] __initconst = {
"swdt_ext_clk"};
+#ifdef CONFIG_SUSPEND
+static struct clk *iopll_save_parent;
+
+#define TOPSW_CLK_CTRL_DIS_MASK BIT(0)
+
+int zynq_clk_suspend_early(void)
+{
+ int ret;
+
+ iopll_save_parent = clk_get_parent(clks[iopll]);
+
+ ret = clk_set_parent(clks[iopll], ps_clk);
+ if (ret)
+ pr_info("%s: reparent iopll failed %d\n", __func__, ret);
+
+ return 0;
+}
+
+void zynq_clk_resume_late(void)
+{
+ clk_set_parent(clks[iopll], iopll_save_parent);
+}
+
+void zynq_clk_topswitch_enable(void)
+{
+ u32 reg;
+
+ reg = readl(SLCR_TOPSW_CLK_CTRL);
+ reg &= ~TOPSW_CLK_CTRL_DIS_MASK;
+ writel(reg, SLCR_TOPSW_CLK_CTRL);
+}
+
+void zynq_clk_topswitch_disable(void)
+{
+ u32 reg;
+
+ reg = readl(SLCR_TOPSW_CLK_CTRL);
+ reg |= TOPSW_CLK_CTRL_DIS_MASK;
+ writel(reg, SLCR_TOPSW_CLK_CTRL);
+}
+#endif
+
static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
const char *clk_name, void __iomem *fclk_ctrl_reg,
const char **parents, int enable)
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index 6f057ab9df03..127760c213fd 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -34,6 +34,8 @@
#define END_OF_PARENTS 1
#define RESERVED_CLK_NAME ""
+#define CLK_TYPE_FLAG2_FIELD_MASK GENMASK(7, 4)
+#define CLK_TYPE_FLAG_BITS 8
#define CLK_GET_NAME_RESP_LEN 16
#define CLK_GET_TOPOLOGY_RESP_WORDS 3
#define CLK_GET_PARENTS_RESP_WORDS 3
@@ -396,6 +398,9 @@ static int __zynqmp_clock_get_topology(struct clock_topology *topology,
topology[*nnodes].type_flag =
FIELD_GET(CLK_TOPOLOGY_TYPE_FLAGS,
response->topology[i]);
+ topology[*nnodes].type_flag |=
+ FIELD_GET(CLK_TYPE_FLAG2_FIELD_MASK, response->topology[i]) <<
+ CLK_TYPE_FLAG_BITS;
(*nnodes)++;
}
@@ -752,6 +757,7 @@ static int zynqmp_clock_probe(struct platform_device *pdev)
static const struct of_device_id zynqmp_clock_of_match[] = {
{.compatible = "xlnx,zynqmp-clk"},
+ {.compatible = "xlnx,versal-clk"},
{},
};
MODULE_DEVICE_TABLE(of, zynqmp_clock_of_match);
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index d8f5b70d2709..46b88f3c6ac8 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -25,7 +25,7 @@
#define to_zynqmp_clk_divider(_hw) \
container_of(_hw, struct zynqmp_clk_divider, hw)
-#define CLK_FRAC BIT(13) /* has a fractional parent */
+#define CLK_FRAC BIT(8) /* has a fractional parent */
/**
* struct zynqmp_clk_divider - adjustable divider clock
@@ -34,13 +34,15 @@
* @is_frac: The divider is a fractional divider
* @clk_id: Id of clock
* @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2)
+ * @max_div: Maximum supported divisor
*/
struct zynqmp_clk_divider {
struct clk_hw hw;
- u8 flags;
+ u16 flags;
bool is_frac;
u32 clk_id;
u32 div_type;
+ u32 max_div;
};
static inline int zynqmp_divider_get_val(unsigned long parent_rate,
@@ -88,6 +90,34 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP_ULL(parent_rate, value);
}
+static void zynqmp_compute_divider(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u32 max_div,
+ int *bestdiv)
+{
+ int div1;
+ int div2;
+ long error = LONG_MAX;
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+ struct zynqmp_clk_divider *pdivider = to_zynqmp_clk_divider(parent_hw);
+
+ if (!pdivider)
+ return;
+
+ *bestdiv = 1;
+ for (div1 = 1; div1 <= pdivider->max_div; div1++) {
+ for (div2 = 1; div2 <= max_div; div2++) {
+ long new_error = ((parent_rate / div1) / div2) - rate;
+
+ if (abs(new_error) < abs(error)) {
+ *bestdiv = div2;
+ error = new_error;
+ }
+ }
+ }
+}
+
/**
* zynqmp_clk_divider_round_rate() - Round rate of divider clock
* @hw: handle between common and hardware-specific interfaces
@@ -125,6 +155,17 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
bestdiv = zynqmp_divider_get_val(*prate, rate);
+ /*
+ * In case of two divisors, compute best divider values and return
+ * divider2 value based on compute value. div1 will be automatically
+ * set to optimum based on required total divider value.
+ */
+ if (bestdiv > divider->max_div && div_type == TYPE_DIV2 &&
+ (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ zynqmp_compute_divider(hw, rate, *prate,
+ divider->max_div, &bestdiv);
+ }
+
if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
bestdiv = rate % *prate ? 1 : bestdiv;
*prate = rate * bestdiv;
@@ -195,6 +236,9 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
struct clk_hw *hw;
struct clk_init_data init;
int ret;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
/* allocate the divider */
div = kzalloc(sizeof(*div), GFP_KERNEL);
@@ -215,6 +259,21 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
div->clk_id = clk_id;
div->div_type = nodes->type;
+ /*
+ * To achieve best possible rate, maximum limit of divider is required
+ * while computation. Get maximum supported divisor from firmware. To
+ * maintain backward compatibility assign maximum possible value(0xFFFF)
+ * if query for max divisor is not successful.
+ */
+ qdata.qid = PM_QID_CLOCK_GET_MAX_DIVISOR;
+ qdata.arg1 = clk_id;
+ qdata.arg2 = nodes->type;
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ div->max_div = 0XFFFF;
+ else
+ div->max_div = ret_payload[1];
+
hw = &div->hw;
ret = clk_hw_register(NULL, hw);
if (ret) {
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index a541397a172c..2f4ccaa6ae41 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -188,9 +188,12 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
frac = (parent_rate * f) / FRAC_DIV;
ret = eemi_ops->clock_setdivider(clk_id, m);
- if (ret)
- pr_warn_once("%s() set divider failed for %s, ret = %d\n",
- __func__, clk_name, ret);
+ if (ret) {
+ if (ret == -EUSERS)
+ WARN(1, "More than allowed devices are using the %s, which is forbidden\n", clk_name);
+ pr_err("%s() set divider failed for %s, ret = %d\n",
+ __func__, clk_name, ret);
+ }
eemi_ops->ioctl(0, IOCTL_SET_PLL_FRAC_DATA, clk_id, f, NULL);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4bdd761a8ca9..0e24f1b9bb33 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -659,6 +659,36 @@ config CRYPTO_DEV_ROCKCHIP
This driver interfaces with the hardware crypto accelerator.
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
+config CRYPTO_DEV_ZYNQMP_SHA3
+ tristate "Support for Xilinx ZynqMP SHA3 hw accelerator"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select CRYPTO_HASH
+ help
+ ZynqMP processors have Keccak-SHA384 hw accelerator.
+
+ Select this if you want to use the ZynqMP module for
+ Keccak-SHA384 algorithms.
+
+config CRYPTO_DEV_XILINX_RSA
+ tristate "Support for Xilinx ZynqMP RSA hw accelerator"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER
+ help
+ Xilinx processors have RSA module accelerator. Select this if you
+ want to use the ZynqMP module for RSA algorithms.
+
+config CRYPTO_DEV_ZYNQMP_AES
+ tristate "Support for Xilinx ZynqMP AES hw accelerator"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER
+ help
+ Xilinx processors have AES-GCM engine used for symmetric key
+ encryption and decryption. This driver interfaces with AES hw
+ accelerator. Select this if you want to use the ZynqMP module
+ for AES algorithms.
+
config CRYPTO_DEV_MEDIATEK
tristate "MediaTek's EIP97 Cryptographic Engine driver"
depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index a23a7197fcd7..c25b9236e533 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -45,4 +45,7 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes.o
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o
+obj-$(CONFIG_CRYPTO_DEV_XILINX_RSA) += zynqmp-rsa.o
obj-y += hisilicon/
diff --git a/drivers/crypto/zynqmp-aes.c b/drivers/crypto/zynqmp-aes.c
new file mode 100644
index 000000000000..595447b31185
--- /dev/null
+++ b/drivers/crypto/zynqmp-aes.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP AES Driver.
+ * Copyright (c) 2018 Xilinx Inc.
+ */
+
+#include <crypto/aes.h>
+#include <crypto/scatterwalk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_AES_QUEUE_LENGTH 1
+#define ZYNQMP_AES_IV_SIZE 12
+#define ZYNQMP_AES_GCM_SIZE 16
+#define ZYNQMP_AES_KEY_SIZE 32
+
+#define ZYNQMP_AES_DECRYPT 0
+#define ZYNQMP_AES_ENCRYPT 1
+
+#define ZYNQMP_AES_KUP_KEY 0
+
+#define ZYNQMP_AES_GCM_TAG_MISMATCH_ERR 0x01
+#define ZYNQMP_AES_SIZE_ERR 0x06
+#define ZYNQMP_AES_WRONG_KEY_SRC_ERR 0x13
+#define ZYNQMP_AES_PUF_NOT_PROGRAMMED 0xE300
+
+#define ZYNQMP_AES_BLOCKSIZE 0x04
+
+struct zynqmp_aes_dev {
+ struct list_head list;
+ struct device *dev;
+ /* the lock protects queue and dev list */
+ spinlock_t lock;
+ struct crypto_queue queue;
+};
+
+struct zynqmp_aes_op {
+ struct zynqmp_aes_dev *dd;
+ void *src;
+ void *dst;
+ int len;
+ u8 key[ZYNQMP_AES_KEY_SIZE];
+ u8 *iv;
+ u32 keylen;
+ u32 keytype;
+};
+
+struct zynqmp_aes_data {
+ u64 src;
+ u64 iv;
+ u64 key;
+ u64 dst;
+ u64 size;
+ u64 optype;
+ u64 keysrc;
+};
+
+struct zynqmp_aes_drv {
+ struct list_head dev_list;
+ /* the lock protects dev list */
+ spinlock_t lock;
+};
+
+static struct zynqmp_aes_drv zynqmp_aes = {
+ .dev_list = LIST_HEAD_INIT(zynqmp_aes.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(zynqmp_aes.lock),
+};
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+static struct zynqmp_aes_dev *zynqmp_aes_find_dev(struct zynqmp_aes_op *ctx)
+{
+ struct zynqmp_aes_dev *aes_dd = NULL;
+ struct zynqmp_aes_dev *tmp;
+
+ spin_lock_bh(&zynqmp_aes.lock);
+ if (!ctx->dd) {
+ list_for_each_entry(tmp, &zynqmp_aes.dev_list, list) {
+ aes_dd = tmp;
+ break;
+ }
+ ctx->dd = aes_dd;
+ } else {
+ aes_dd = ctx->dd;
+ }
+ spin_unlock_bh(&zynqmp_aes.lock);
+
+ return aes_dd;
+}
+
+static int zynqmp_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct zynqmp_aes_op *op = crypto_tfm_ctx(tfm);
+
+ op->keylen = len;
+ memcpy(op->key, key, len);
+
+ return 0;
+}
+
+static int zynqmp_setkeytype(struct crypto_tfm *tfm, const u8 *keytype,
+ unsigned int len)
+{
+ struct zynqmp_aes_op *op = crypto_tfm_ctx(tfm);
+
+ op->keytype = (u32)(*keytype);
+
+ return 0;
+}
+
+static int zynqmp_aes_xcrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes,
+ unsigned int flags)
+{
+ struct zynqmp_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+ struct zynqmp_aes_dev *dd = zynqmp_aes_find_dev(op);
+ int err, ret, copy_bytes, src_data = 0, dst_data = 0;
+ dma_addr_t dma_addr, dma_addr_buf;
+ struct zynqmp_aes_data *abuf;
+ struct blkcipher_walk walk;
+ unsigned int data_size;
+ size_t dma_size;
+ char *kbuf;
+
+ if (!eemi_ops->aes)
+ return -ENOTSUPP;
+
+ if (op->keytype == ZYNQMP_AES_KUP_KEY)
+ dma_size = nbytes + ZYNQMP_AES_KEY_SIZE
+ + ZYNQMP_AES_IV_SIZE;
+ else
+ dma_size = nbytes + ZYNQMP_AES_IV_SIZE;
+
+ kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ abuf = dma_alloc_coherent(dd->dev, sizeof(struct zynqmp_aes_data),
+ &dma_addr_buf, GFP_KERNEL);
+ if (!abuf) {
+ dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
+ return -ENOMEM;
+ }
+
+ data_size = nbytes;
+ blkcipher_walk_init(&walk, dst, src, data_size);
+ err = blkcipher_walk_virt(desc, &walk);
+ op->iv = walk.iv;
+
+ while ((nbytes = walk.nbytes)) {
+ op->src = walk.src.virt.addr;
+ memcpy(kbuf + src_data, op->src, nbytes);
+ src_data = src_data + nbytes;
+ nbytes &= (ZYNQMP_AES_BLOCKSIZE - 1);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+ memcpy(kbuf + data_size, op->iv, ZYNQMP_AES_IV_SIZE);
+ abuf->src = dma_addr;
+ abuf->dst = dma_addr;
+ abuf->iv = abuf->src + data_size;
+ abuf->size = data_size - ZYNQMP_AES_GCM_SIZE;
+ abuf->optype = flags;
+ abuf->keysrc = op->keytype;
+
+ if (op->keytype == ZYNQMP_AES_KUP_KEY) {
+ memcpy(kbuf + data_size + ZYNQMP_AES_IV_SIZE,
+ op->key, ZYNQMP_AES_KEY_SIZE);
+
+ abuf->key = abuf->src + data_size + ZYNQMP_AES_IV_SIZE;
+ } else {
+ abuf->key = 0;
+ }
+ eemi_ops->aes(dma_addr_buf, &ret);
+
+ if (ret != 0) {
+ switch (ret) {
+ case ZYNQMP_AES_GCM_TAG_MISMATCH_ERR:
+ dev_err(dd->dev, "ERROR: Gcm Tag mismatch\n\r");
+ break;
+ case ZYNQMP_AES_SIZE_ERR:
+ dev_err(dd->dev, "ERROR : Non word aligned data\n\r");
+ break;
+ case ZYNQMP_AES_WRONG_KEY_SRC_ERR:
+ dev_err(dd->dev, "ERROR: Wrong KeySrc, enable secure mode\n\r");
+ break;
+ case ZYNQMP_AES_PUF_NOT_PROGRAMMED:
+ dev_err(dd->dev, "ERROR: PUF is not registered\r\n");
+ break;
+ default:
+ dev_err(dd->dev, "ERROR: Invalid");
+ break;
+ }
+ goto END;
+ }
+ if (flags)
+ copy_bytes = data_size;
+ else
+ copy_bytes = data_size - ZYNQMP_AES_GCM_SIZE;
+
+ blkcipher_walk_init(&walk, dst, src, copy_bytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ memcpy(walk.dst.virt.addr, kbuf + dst_data, nbytes);
+ dst_data = dst_data + nbytes;
+ nbytes &= (ZYNQMP_AES_BLOCKSIZE - 1);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+END:
+ dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
+ dma_free_coherent(dd->dev, sizeof(struct zynqmp_aes_data),
+ abuf, dma_addr_buf);
+ return err;
+}
+
+static int zynqmp_aes_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return zynqmp_aes_xcrypt(desc, dst, src, nbytes, ZYNQMP_AES_DECRYPT);
+}
+
+static int zynqmp_aes_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return zynqmp_aes_xcrypt(desc, dst, src, nbytes, ZYNQMP_AES_ENCRYPT);
+}
+
+static struct crypto_alg zynqmp_alg = {
+ .cra_name = "xilinx-zynqmp-aes",
+ .cra_driver_name = "zynqmp-aes",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = ZYNQMP_AES_BLOCKSIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_aes_op),
+ .cra_alignmask = 15,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = 0,
+ .max_keysize = ZYNQMP_AES_KEY_SIZE,
+ .setkey = zynqmp_setkey_blk,
+ .setkeytype = zynqmp_setkeytype,
+ .encrypt = zynqmp_aes_encrypt,
+ .decrypt = zynqmp_aes_decrypt,
+ .ivsize = ZYNQMP_AES_IV_SIZE,
+ }
+ }
+};
+
+static const struct of_device_id zynqmp_aes_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-aes" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, zynqmp_aes_dt_ids);
+
+static int zynqmp_aes_probe(struct platform_device *pdev)
+{
+ struct zynqmp_aes_dev *aes_dd;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ aes_dd = devm_kzalloc(dev, sizeof(*aes_dd), GFP_KERNEL);
+ if (!aes_dd)
+ return -ENOMEM;
+
+ aes_dd->dev = dev;
+ platform_set_drvdata(pdev, aes_dd);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(44));
+ if (ret < 0) {
+ dev_err(dev, "no usable DMA configuration");
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&aes_dd->list);
+ crypto_init_queue(&aes_dd->queue, ZYNQMP_AES_QUEUE_LENGTH);
+ list_add_tail(&aes_dd->list, &zynqmp_aes.dev_list);
+
+ ret = crypto_register_alg(&zynqmp_alg);
+ if (ret)
+ goto err_algs;
+
+ dev_info(dev, "AES Successfully Registered\n\r");
+ return 0;
+
+err_algs:
+ list_del(&aes_dd->list);
+ dev_err(dev, "initialization failed.\n");
+
+ return ret;
+}
+
+static int zynqmp_aes_remove(struct platform_device *pdev)
+{
+ struct zynqmp_aes_dev *aes_dd;
+
+ aes_dd = platform_get_drvdata(pdev);
+ if (!aes_dd)
+ return -ENODEV;
+ list_del(&aes_dd->list);
+ crypto_unregister_alg(&zynqmp_alg);
+ return 0;
+}
+
+static struct platform_driver xilinx_aes_driver = {
+ .probe = zynqmp_aes_probe,
+ .remove = zynqmp_aes_remove,
+ .driver = {
+ .name = "zynqmp_aes",
+ .of_match_table = of_match_ptr(zynqmp_aes_dt_ids),
+ },
+};
+
+module_platform_driver(xilinx_aes_driver);
+
+MODULE_DESCRIPTION("Xilinx ZynqMP AES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nava kishore Manne <nava.manne@xilinx.com>");
+MODULE_AUTHOR("Kalyani Akula <kalyani.akula@xilinx.com>");
diff --git a/drivers/crypto/zynqmp-rsa.c b/drivers/crypto/zynqmp-rsa.c
new file mode 100644
index 000000000000..410d816ae84b
--- /dev/null
+++ b/drivers/crypto/zynqmp-rsa.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Xilinx, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_RSA_QUEUE_LENGTH 1
+#define ZYNQMP_RSA_MAX_KEY_SIZE 1024
+#define ZYNQMP_BLOCKSIZE 64
+
+struct zynqmp_rsa_dev;
+
+struct zynqmp_rsa_op {
+ struct zynqmp_rsa_dev *dd;
+ void *src;
+ void *dst;
+ int len;
+ u8 key[ZYNQMP_RSA_MAX_KEY_SIZE];
+ u8 *iv;
+ u32 keylen;
+};
+
+struct zynqmp_rsa_dev {
+ struct list_head list;
+ struct device *dev;
+ /* the lock protects queue and dev list*/
+ spinlock_t lock;
+ struct crypto_queue queue;
+};
+
+struct zynqmp_rsa_drv {
+ struct list_head dev_list;
+ /* the lock protects queue and dev list*/
+ spinlock_t lock;
+};
+
+static struct zynqmp_rsa_drv zynqmp_rsa = {
+ .dev_list = LIST_HEAD_INIT(zynqmp_rsa.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(zynqmp_rsa.lock),
+};
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+static struct zynqmp_rsa_dev *zynqmp_rsa_find_dev(struct zynqmp_rsa_op *ctx)
+{
+ struct zynqmp_rsa_dev *rsa_dd = NULL;
+ struct zynqmp_rsa_dev *tmp;
+
+ spin_lock_bh(&zynqmp_rsa.lock);
+ if (!ctx->dd) {
+ list_for_each_entry(tmp, &zynqmp_rsa.dev_list, list) {
+ rsa_dd = tmp;
+ break;
+ }
+ ctx->dd = rsa_dd;
+ } else {
+ rsa_dd = ctx->dd;
+ }
+ spin_unlock_bh(&zynqmp_rsa.lock);
+
+ return rsa_dd;
+}
+
+static int zynqmp_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct zynqmp_rsa_op *op = crypto_tfm_ctx(tfm);
+
+ op->keylen = len;
+ memcpy(op->key, key, len);
+ return 0;
+}
+
+static int zynqmp_rsa_xcrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, unsigned int flags)
+{
+ struct zynqmp_rsa_op *op = crypto_blkcipher_ctx(desc->tfm);
+ struct zynqmp_rsa_dev *dd = zynqmp_rsa_find_dev(op);
+ int err, datasize, src_data = 0, dst_data = 0;
+ struct blkcipher_walk walk;
+ char *kbuf;
+ size_t dma_size;
+ dma_addr_t dma_addr;
+
+ if (!eemi_ops->rsa)
+ return -ENOTSUPP;
+
+ dma_size = nbytes + op->keylen;
+ kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((datasize = walk.nbytes)) {
+ op->src = walk.src.virt.addr;
+ memcpy(kbuf + src_data, op->src, datasize);
+ src_data = src_data + datasize;
+ datasize &= (ZYNQMP_BLOCKSIZE - 1);
+ err = blkcipher_walk_done(desc, &walk, datasize);
+ }
+ memcpy(kbuf + nbytes, op->key, op->keylen);
+ eemi_ops->rsa(dma_addr, nbytes, flags);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((datasize = walk.nbytes)) {
+ memcpy(walk.dst.virt.addr, kbuf + dst_data, datasize);
+ dst_data = dst_data + datasize;
+ datasize &= (ZYNQMP_BLOCKSIZE - 1);
+ err = blkcipher_walk_done(desc, &walk, datasize);
+ }
+ dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
+ return err;
+}
+
+static int
+zynqmp_rsa_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return zynqmp_rsa_xcrypt(desc, dst, src, nbytes, 0);
+}
+
+static int
+zynqmp_rsa_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return zynqmp_rsa_xcrypt(desc, dst, src, nbytes, 1);
+}
+
+static struct crypto_alg zynqmp_alg = {
+ .cra_name = "xilinx-zynqmp-rsa",
+ .cra_driver_name = "zynqmp-rsa",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = ZYNQMP_BLOCKSIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_rsa_op),
+ .cra_alignmask = 15,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = 0,
+ .max_keysize = ZYNQMP_RSA_MAX_KEY_SIZE,
+ .setkey = zynqmp_setkey_blk,
+ .encrypt = zynqmp_rsa_encrypt,
+ .decrypt = zynqmp_rsa_decrypt,
+ .ivsize = 1,
+ }
+ }
+};
+
+static const struct of_device_id zynqmp_rsa_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-rsa" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_rsa_dt_ids);
+
+static int zynqmp_rsa_probe(struct platform_device *pdev)
+{
+ struct zynqmp_rsa_dev *rsa_dd;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ rsa_dd = devm_kzalloc(&pdev->dev, sizeof(*rsa_dd), GFP_KERNEL);
+ if (!rsa_dd)
+ return -ENOMEM;
+
+ rsa_dd->dev = dev;
+ platform_set_drvdata(pdev, rsa_dd);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (ret < 0)
+ dev_err(dev, "no usable DMA configuration");
+
+ INIT_LIST_HEAD(&rsa_dd->list);
+ spin_lock_init(&rsa_dd->lock);
+ crypto_init_queue(&rsa_dd->queue, ZYNQMP_RSA_QUEUE_LENGTH);
+ spin_lock(&zynqmp_rsa.lock);
+ list_add_tail(&rsa_dd->list, &zynqmp_rsa.dev_list);
+ spin_unlock(&zynqmp_rsa.lock);
+
+ ret = crypto_register_alg(&zynqmp_alg);
+ if (ret)
+ goto err_algs;
+
+ return 0;
+
+err_algs:
+ spin_lock(&zynqmp_rsa.lock);
+ list_del(&rsa_dd->list);
+ spin_unlock(&zynqmp_rsa.lock);
+ dev_err(dev, "initialization failed.\n");
+ return ret;
+}
+
+static int zynqmp_rsa_remove(struct platform_device *pdev)
+{
+ crypto_unregister_alg(&zynqmp_alg);
+ return 0;
+}
+
+static struct platform_driver xilinx_rsa_driver = {
+ .probe = zynqmp_rsa_probe,
+ .remove = zynqmp_rsa_remove,
+ .driver = {
+ .name = "zynqmp_rsa",
+ .of_match_table = of_match_ptr(zynqmp_rsa_dt_ids),
+ },
+};
+
+module_platform_driver(xilinx_rsa_driver);
+
+MODULE_DESCRIPTION("ZynqMP RSA hw acceleration support.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>");
diff --git a/drivers/crypto/zynqmp-sha.c b/drivers/crypto/zynqmp-sha.c
new file mode 100644
index 000000000000..3a15fedbad98
--- /dev/null
+++ b/drivers/crypto/zynqmp-sha.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Xilinx, Inc.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_SHA3_INIT 1
+#define ZYNQMP_SHA3_UPDATE 2
+#define ZYNQMP_SHA3_FINAL 4
+
+#define ZYNQMP_SHA_QUEUE_LENGTH 1
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+struct zynqmp_sha_dev;
+
+/*
+ * .statesize = sizeof(struct zynqmp_sha_reqctx) must be <= PAGE_SIZE / 8 as
+ * tested by the ahash_prepare_alg() function.
+ */
+struct zynqmp_sha_reqctx {
+ struct zynqmp_sha_dev *dd;
+ unsigned long flags;
+};
+
+struct zynqmp_sha_ctx {
+ struct zynqmp_sha_dev *dd;
+ unsigned long flags;
+};
+
+struct zynqmp_sha_dev {
+ struct list_head list;
+ struct device *dev;
+ /* the lock protects queue and dev list*/
+ spinlock_t lock;
+ int err;
+
+ unsigned long flags;
+ struct crypto_queue queue;
+ struct ahash_request *req;
+};
+
+struct zynqmp_sha_drv {
+ struct list_head dev_list;
+ /* the lock protects queue and dev list*/
+ spinlock_t lock;
+};
+
+static struct zynqmp_sha_drv zynqmp_sha = {
+ .dev_list = LIST_HEAD_INIT(zynqmp_sha.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(zynqmp_sha.lock),
+};
+
+static int zynqmp_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
+ struct zynqmp_sha_dev *dd = NULL;
+ struct zynqmp_sha_dev *tmp;
+ int ret;
+
+ if (!eemi_ops->sha_hash)
+ return -ENOTSUPP;
+
+ spin_lock_bh(&zynqmp_sha.lock);
+ if (!tctx->dd) {
+ list_for_each_entry(tmp, &zynqmp_sha.dev_list, list) {
+ dd = tmp;
+ break;
+ }
+ tctx->dd = dd;
+ } else {
+ dd = tctx->dd;
+ }
+ spin_unlock_bh(&zynqmp_sha.lock);
+
+ ctx->dd = dd;
+ dev_dbg(dd->dev, "init: digest size: %d\n",
+ crypto_ahash_digestsize(tfm));
+
+ ret = eemi_ops->sha_hash(0, 0, ZYNQMP_SHA3_INIT);
+
+ return ret;
+}
+
+static int zynqmp_sha_update(struct ahash_request *req)
+{
+ struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct zynqmp_sha_dev *dd = tctx->dd;
+ char *kbuf;
+ size_t dma_size = req->nbytes;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (!req->nbytes)
+ return 0;
+
+ if (!eemi_ops->sha_hash)
+ return -ENOTSUPP;
+
+ kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(kbuf, req->src, 0, req->nbytes, 0);
+ __flush_cache_user_range((unsigned long)kbuf,
+ (unsigned long)kbuf + dma_size);
+ ret = eemi_ops->sha_hash(dma_addr, req->nbytes, ZYNQMP_SHA3_UPDATE);
+ dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static int zynqmp_sha_final(struct ahash_request *req)
+{
+ struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct zynqmp_sha_dev *dd = tctx->dd;
+ char *kbuf;
+ size_t dma_size = SHA384_DIGEST_SIZE;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (!eemi_ops->sha_hash)
+ return -ENOTSUPP;
+
+ kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ ret = eemi_ops->sha_hash(dma_addr, dma_size, ZYNQMP_SHA3_FINAL);
+ memcpy(req->result, kbuf, 48);
+ dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static int zynqmp_sha_finup(struct ahash_request *req)
+{
+ zynqmp_sha_update(req);
+ zynqmp_sha_final(req);
+
+ return 0;
+}
+
+static int zynqmp_sha_digest(struct ahash_request *req)
+{
+ zynqmp_sha_init(req);
+ zynqmp_sha_update(req);
+ zynqmp_sha_final(req);
+
+ return 0;
+}
+
+static int zynqmp_sha_export(struct ahash_request *req, void *out)
+{
+ const struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(*ctx));
+ return 0;
+}
+
+static int zynqmp_sha_import(struct ahash_request *req, const void *in)
+{
+ struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(*ctx));
+ return 0;
+}
+
+static int zynqmp_sha_cra_init(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct zynqmp_sha_reqctx));
+
+ return 0;
+}
+
+static struct ahash_alg sha3_alg = {
+ .init = zynqmp_sha_init,
+ .update = zynqmp_sha_update,
+ .final = zynqmp_sha_final,
+ .finup = zynqmp_sha_finup,
+ .digest = zynqmp_sha_digest,
+ .export = zynqmp_sha_export,
+ .import = zynqmp_sha_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "xilinx-keccak-384",
+ .cra_driver_name = "zynqmp-keccak-384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = zynqmp_sha_cra_init,
+ }
+ }
+};
+
+static const struct of_device_id zynqmp_sha_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-keccak-384" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_sha_dt_ids);
+
+static int zynqmp_sha_probe(struct platform_device *pdev)
+{
+ struct zynqmp_sha_dev *sha_dd;
+ struct device *dev = &pdev->dev;
+ int err;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
+ if (!sha_dd)
+ return -ENOMEM;
+
+ sha_dd->dev = dev;
+ platform_set_drvdata(pdev, sha_dd);
+ INIT_LIST_HEAD(&sha_dd->list);
+ spin_lock_init(&sha_dd->lock);
+ crypto_init_queue(&sha_dd->queue, ZYNQMP_SHA_QUEUE_LENGTH);
+ spin_lock(&zynqmp_sha.lock);
+ list_add_tail(&sha_dd->list, &zynqmp_sha.dev_list);
+ spin_unlock(&zynqmp_sha.lock);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (err < 0)
+ dev_err(dev, "no usable DMA configuration");
+
+ err = crypto_register_ahash(&sha3_alg);
+ if (err)
+ goto err_algs;
+
+ return 0;
+
+err_algs:
+ spin_lock(&zynqmp_sha.lock);
+ list_del(&sha_dd->list);
+ spin_unlock(&zynqmp_sha.lock);
+ dev_err(dev, "initialization failed.\n");
+
+ return err;
+}
+
+static int zynqmp_sha_remove(struct platform_device *pdev)
+{
+ static struct zynqmp_sha_dev *sha_dd;
+
+ sha_dd = platform_get_drvdata(pdev);
+
+ if (!sha_dd)
+ return -ENODEV;
+
+ spin_lock(&zynqmp_sha.lock);
+ list_del(&sha_dd->list);
+ spin_unlock(&zynqmp_sha.lock);
+
+ crypto_unregister_ahash(&sha3_alg);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_sha_driver = {
+ .probe = zynqmp_sha_probe,
+ .remove = zynqmp_sha_remove,
+ .driver = {
+ .name = "zynqmp-keccak-384",
+ .of_match_table = of_match_ptr(zynqmp_sha_dt_ids),
+ },
+};
+
+module_platform_driver(zynqmp_sha_driver);
+
+MODULE_DESCRIPTION("ZynqMP SHA3 hw acceleration support.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 703275cc29de..49726e8f646d 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -622,6 +622,8 @@ config XGENE_DMA
help
Enable support for the APM X-Gene SoC DMA engine.
+source "drivers/dma/xilinx/Kconfig"
+
config XILINX_DMA
tristate "Xilinx AXI DMAS Engine"
depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
@@ -648,6 +650,18 @@ config XILINX_ZYNQMP_DMA
help
Enable support for Xilinx ZynqMP DMA controller.
+config XILINX_PS_PCIE_DMA
+ tristate "Xilinx PS PCIe DMA support"
+ depends on (PCI && X86_64 || ARM64)
+ select DMA_ENGINE
+ help
+ Enable support for the Xilinx PS PCIe DMA engine present
+ in recent Xilinx ZynqMP chipsets.
+
+ Say Y here if you have such a chipset.
+
+ If unsure, say N.
+
config ZX_DMA
tristate "ZTE ZX DMA support"
depends on ARCH_ZX || COMPILE_TEST
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index dd22dbd0c4ea..69b72e825d85 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -964,6 +964,13 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
+ if (dma_has_cap(DMA_SG, device->cap_mask) && !device->device_prep_dma_sg) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_SG");
+ return -EIO;
+ }
+
if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index dd1c65e61406..cb5a55c9d195 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -44,10 +44,15 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
+static unsigned int sg_buffers = 1;
+module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(sg_buffers,
+ "Number of scatter gather buffers (default: 1)");
+
static unsigned int dmatest;
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dmatest,
- "dmatest 0-memcpy 1-memset (default: 0)");
+ "dmatest 0-memcpy 1-slave_sg 2-memset (default: 0)");
static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
@@ -596,6 +601,9 @@ static int dmatest_func(void *data)
params->alignment;
src->cnt = dst->cnt = 1;
is_memset = true;
+ } else if (thread->type == DMA_SG) {
+ align = dev->copy_align;
+ src->cnt = dst->cnt = sg_buffers;
} else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
src->cnt = min_odd(params->xor_sources | 1, dev->max_xor);
@@ -660,9 +668,13 @@ static int dmatest_func(void *data)
struct dmaengine_unmap_data *um;
dma_addr_t *dsts;
unsigned int len;
+ struct scatterlist tx_sg[src->cnt];
+ struct scatterlist rx_sg[src->cnt];
total_tests++;
+ align = 3;
+
if (params->transfer_size) {
if (params->transfer_size >= buf_size) {
pr_err("%u-byte transfer size must be lower than %u-buffer size\n",
@@ -750,6 +762,15 @@ static int dmatest_func(void *data)
um->bidi_cnt++;
}
+ sg_init_table(tx_sg, src->cnt);
+ sg_init_table(rx_sg, src->cnt);
+ for (i = 0; i < src->cnt; i++) {
+ sg_dma_address(&rx_sg[i]) = srcs[i];
+ sg_dma_address(&tx_sg[i]) = dsts[i] + dst->off;
+ sg_dma_len(&tx_sg[i]) = len;
+ sg_dma_len(&rx_sg[i]) = len;
+ }
+
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
dsts[0] + dst->off,
@@ -759,6 +780,9 @@ static int dmatest_func(void *data)
dsts[0] + dst->off,
*(src->aligned[0] + src->off),
len, flags);
+ else if (thread->type == DMA_SG)
+ tx = dev->device_prep_dma_sg(chan, tx_sg, src->cnt,
+ rx_sg, src->cnt, flags);
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
dsts[0] + dst->off,
@@ -924,6 +948,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
op = "copy";
else if (type == DMA_MEMSET)
op = "set";
+ else if (type == DMA_SG)
+ op = "sg";
else if (type == DMA_XOR)
op = "xor";
else if (type == DMA_PQ)
@@ -987,12 +1013,19 @@ static int dmatest_add_channel(struct dmatest_info *info,
}
if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
- if (dmatest == 1) {
+ if (dmatest == 2) {
cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
thread_count += cnt > 0 ? cnt : 0;
}
}
+ if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
+ if (dmatest == 1) {
+ cnt = dmatest_add_threads(info, dtc, DMA_SG);
+ thread_count += cnt > 0 ? cnt : 0;
+ }
+ }
+
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
cnt = dmatest_add_threads(info, dtc, DMA_XOR);
thread_count += cnt > 0 ? cnt : 0;
@@ -1069,6 +1102,7 @@ static void add_threaded_test(struct dmatest_info *info)
request_channels(info, DMA_MEMCPY);
request_channels(info, DMA_MEMSET);
request_channels(info, DMA_XOR);
+ request_channels(info, DMA_SG);
request_channels(info, DMA_PQ);
}
diff --git a/drivers/dma/xilinx/Kconfig b/drivers/dma/xilinx/Kconfig
new file mode 100644
index 000000000000..3bb99a88c8cc
--- /dev/null
+++ b/drivers/dma/xilinx/Kconfig
@@ -0,0 +1,67 @@
+#
+# XILINX DMA Engines configuration
+#
+
+menuconfig XILINX_DMA_ENGINES
+ bool "Xilinx DMA Engines"
+ help
+ Enable support for the Xilinx DMA controllers. It supports three DMA
+ engines: Axi Central DMA (memory to memory transfer), Axi DMA (memory and
+ device transfer), and Axi VDMA (memory and video device transfer).
+
+if XILINX_DMA_ENGINES
+
+config XILINX_DMATEST
+ tristate "DMA Test client for AXI DMA"
+ depends on XILINX_DMA
+ help
+ Simple DMA test client. Say N unless you're debugging a
+ DMA Device driver.
+
+config XILINX_VDMATEST
+ tristate "DMA Test client for VDMA"
+ depends on XILINX_DMA
+ help
+ Simple DMA test client. Say N unless you're debugging a
+ DMA Device driver.
+
+config XILINX_CDMATEST
+ tristate "DMA Test client for CDMA"
+ depends on XILINX_DMA
+ help
+ Simple DMA test client. Say N unless you're debugging a
+ DMA Device driver.
+
+config XILINX_DPDMA
+ tristate "Xilinx DPDMA Engine"
+ select DMA_ENGINE
+ help
+ Enable support for Xilinx DisplayPort DMA.
+
+config XILINX_DPDMA_DEBUG_FS
+ bool "Xilinx DPDMA debugfs"
+ depends on DEBUG_FS && XILINX_DPDMA
+ help
+ Enable the debugfs code for DPDMA driver. The debugfs code
+ enables debugging or testing related features. It exposes some
+ low level controls to the user space to help testing automation,
+ as well as can enable additional diagnostic or statistical
+ information.
+
+config XILINX_PS_PCIE_DMA_TEST
+ tristate "Xilinx PS PCIe DMA test client"
+ depends on XILINX_PS_PCIE_DMA
+ help
+ Enable support for the test client of Xilinx PS PCIe DMA engine
+ in recent Xilinx ZynqMP chipsets.
+
+ Say Y here if you have such a chipset.
+
+ If unsure, say N.
+endif # XILINX_DMA_ENGINES
+
+config XILINX_FRMBUF
+ tristate "Xilinx Framebuffer"
+ select DMA_ENGINE
+ help
+ Enable support for Xilinx Framebuffer DMA.
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
index e921de575b55..b6cbae718a7c 100644
--- a/drivers/dma/xilinx/Makefile
+++ b/drivers/dma/xilinx/Makefile
@@ -1,3 +1,11 @@
+obj-$(CONFIG_XILINX_DMATEST) += axidmatest.o
+obj-$(CONFIG_XILINX_VDMATEST) += vdmatest.o
+obj-$(CONFIG_XILINX_CDMATEST) += cdmatest.o
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_XILINX_DPDMA) += xilinx_dpdma.o
obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
+xilinx_ps_pcie_dma-objs := xilinx_ps_pcie_main.o xilinx_ps_pcie_platform.o
+obj-$(CONFIG_XILINX_PS_PCIE_DMA) += xilinx_ps_pcie_dma.o
+obj-$(CONFIG_XILINX_PS_PCIE_DMA_TEST) += xilinx_ps_pcie_dma_client.o
+obj-$(CONFIG_XILINX_FRMBUF) += xilinx_frmbuf.o
diff --git a/drivers/dma/xilinx/axidmatest.c b/drivers/dma/xilinx/axidmatest.c
new file mode 100644
index 000000000000..c819ae4db304
--- /dev/null
+++ b/drivers/dma/xilinx/axidmatest.c
@@ -0,0 +1,664 @@
+/*
+ * XILINX AXI DMA Engine test module
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ * Based on Atmel DMA Test Client
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched/task.h>
+#include <linux/dma/xilinx_dma.h>
+
+static unsigned int test_buf_size = 16384;
+module_param(test_buf_size, uint, S_IRUGO);
+MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
+
+static unsigned int iterations = 5;
+module_param(iterations, uint, S_IRUGO);
+MODULE_PARM_DESC(iterations,
+ "Iterations before stopping test (default: infinite)");
+
+/*
+ * Initialization patterns. All bytes in the source buffer has bit 7
+ * set, all bytes in the destination buffer has bit 7 cleared.
+ *
+ * Bit 6 is set for all bytes which are to be copied by the DMA
+ * engine. Bit 5 is set for all bytes which are to be overwritten by
+ * the DMA engine.
+ *
+ * The remaining bits are the inverse of a counter which increments by
+ * one for each byte address.
+ */
+#define PATTERN_SRC 0x80
+#define PATTERN_DST 0x00
+#define PATTERN_COPY 0x40
+#define PATTERN_OVERWRITE 0x20
+#define PATTERN_COUNT_MASK 0x1f
+
+struct dmatest_slave_thread {
+ struct list_head node;
+ struct task_struct *task;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ u8 **srcs;
+ u8 **dsts;
+ enum dma_transaction_type type;
+ bool done;
+};
+
+struct dmatest_chan {
+ struct list_head node;
+ struct dma_chan *chan;
+ struct list_head threads;
+};
+
+/*
+ * These are protected by dma_list_mutex since they're only used by
+ * the DMA filter function callback
+ */
+static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
+static LIST_HEAD(dmatest_channels);
+static unsigned int nr_channels;
+
+static bool is_threaded_test_run(struct dmatest_chan *tx_dtc,
+ struct dmatest_chan *rx_dtc)
+{
+ struct dmatest_slave_thread *thread;
+ int ret = false;
+
+ list_for_each_entry(thread, &tx_dtc->threads, node) {
+ if (!thread->done)
+ ret = true;
+ }
+
+ list_for_each_entry(thread, &rx_dtc->threads, node) {
+ if (!thread->done)
+ ret = true;
+ }
+ return ret;
+}
+
+static unsigned long dmatest_random(void)
+{
+ unsigned long buf;
+
+ get_random_bytes(&buf, sizeof(buf));
+ return buf;
+}
+
+static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < start + len; i++)
+ buf[i] = PATTERN_SRC | PATTERN_COPY
+ | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < test_buf_size; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ buf++;
+ }
+}
+
+static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < start + len; i++)
+ buf[i] = PATTERN_DST | PATTERN_OVERWRITE
+ | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < test_buf_size; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ }
+}
+
+static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
+ unsigned int counter, bool is_srcbuf)
+{
+ u8 diff = actual ^ pattern;
+ u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ const char *thread_name = current->comm;
+
+ if (is_srcbuf)
+ pr_warn(
+ "%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if ((pattern & PATTERN_COPY)
+ && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+ pr_warn(
+ "%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if (diff & PATTERN_SRC)
+ pr_warn(
+ "%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else
+ pr_warn(
+ "%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+}
+
+static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
+ unsigned int end, unsigned int counter, u8 pattern,
+ bool is_srcbuf)
+{
+ unsigned int i;
+ unsigned int error_count = 0;
+ u8 actual;
+ u8 expected;
+ u8 *buf;
+ unsigned int counter_orig = counter;
+
+ for (; (buf = *bufs); bufs++) {
+ counter = counter_orig;
+ for (i = start; i < end; i++) {
+ actual = buf[i];
+ expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ if (actual != expected) {
+ if (error_count < 32)
+ dmatest_mismatch(actual, pattern, i,
+ counter, is_srcbuf);
+ error_count++;
+ }
+ counter++;
+ }
+ }
+
+ if (error_count > 32)
+ pr_warn("%s: %u errors suppressed\n",
+ current->comm, error_count - 32);
+
+ return error_count;
+}
+
+static void dmatest_slave_tx_callback(void *completion)
+{
+ complete(completion);
+}
+
+static void dmatest_slave_rx_callback(void *completion)
+{
+ complete(completion);
+}
+
+/* Function for slave transfers
+ * Each thread requires 2 channels, one for transmit, and one for receive
+ */
+static int dmatest_slave_func(void *data)
+{
+ struct dmatest_slave_thread *thread = data;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ const char *thread_name;
+ unsigned int src_off, dst->off, len;
+ unsigned int error_count;
+ unsigned int failed_tests = 0;
+ unsigned int total_tests = 0;
+ dma_cookie_t tx_cookie;
+ dma_cookie_t rx_cookie;
+ enum dma_status status;
+ enum dma_ctrl_flags flags;
+ int ret;
+ int src->cnt;
+ int dst->cnt;
+ int bd_cnt = 11;
+ int i;
+ thread_name = current->comm;
+
+ ret = -ENOMEM;
+
+ smp_rmb();
+ tx_chan = thread->tx_chan;
+ rx_chan = thread->rx_chan;
+ src->cnt = dst->cnt = bd_cnt;
+
+ thread->srcs = kcalloc(src->cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->srcs)
+ goto err_srcs;
+ for (i = 0; i < src->cnt; i++) {
+ thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->srcs[i])
+ goto err_srcbuf;
+ }
+ thread->srcs[i] = NULL;
+
+ thread->dsts = kcalloc(dst->cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->dsts)
+ goto err_dsts;
+ for (i = 0; i < dst->cnt; i++) {
+ thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->dsts[i])
+ goto err_dstbuf;
+ }
+ thread->dsts[i] = NULL;
+
+ set_user_nice(current, 10);
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ while (!kthread_should_stop()
+ && !(iterations && total_tests >= iterations)) {
+ struct dma_device *tx_dev = tx_chan->device;
+ struct dma_device *rx_dev = rx_chan->device;
+ struct dma_async_tx_descriptor *txd = NULL;
+ struct dma_async_tx_descriptor *rxd = NULL;
+ dma_addr_t dma_srcs[src->cnt];
+ dma_addr_t dma_dsts[dst->cnt];
+ struct completion rx_cmp;
+ struct completion tx_cmp;
+ unsigned long rx_tmo =
+ msecs_to_jiffies(300000); /* RX takes longer */
+ unsigned long tx_tmo = msecs_to_jiffies(30000);
+ u8 align = 0;
+ struct scatterlist tx_sg[bd_cnt];
+ struct scatterlist rx_sg[bd_cnt];
+
+ total_tests++;
+
+ /* honor larger alignment restrictions */
+ align = tx_dev->copy_align;
+ if (rx_dev->copy_align > align)
+ align = rx_dev->copy_align;
+
+ if (1 << align > test_buf_size) {
+ pr_err("%u-byte buffer too small for %d-byte alignment\n",
+ test_buf_size, 1 << align);
+ break;
+ }
+
+ len = dmatest_random() % test_buf_size + 1;
+ len = (len >> align) << align;
+ if (!len)
+ len = 1 << align;
+ src_off = dmatest_random() % (test_buf_size - len + 1);
+ dst->off = dmatest_random() % (test_buf_size - len + 1);
+
+ src_off = (src_off >> align) << align;
+ dst->off = (dst->off >> align) << align;
+
+ dmatest_init_srcs(thread->srcs, src_off, len);
+ dmatest_init_dsts(thread->dsts, dst->off, len);
+
+ for (i = 0; i < src->cnt; i++) {
+ u8 *buf = thread->srcs[i] + src_off;
+
+ dma_srcs[i] = dma_map_single(tx_dev->dev, buf, len,
+ DMA_MEM_TO_DEV);
+ }
+
+ for (i = 0; i < dst->cnt; i++) {
+ dma_dsts[i] = dma_map_single(rx_dev->dev,
+ thread->dsts[i],
+ test_buf_size,
+ DMA_MEM_TO_DEV);
+
+ dma_unmap_single(rx_dev->dev, dma_dsts[i],
+ test_buf_size,
+ DMA_MEM_TO_DEV);
+
+ dma_dsts[i] = dma_map_single(rx_dev->dev,
+ thread->dsts[i],
+ test_buf_size,
+ DMA_DEV_TO_MEM);
+ }
+
+ sg_init_table(tx_sg, bd_cnt);
+ sg_init_table(rx_sg, bd_cnt);
+
+ for (i = 0; i < bd_cnt; i++) {
+ sg_dma_address(&tx_sg[i]) = dma_srcs[i];
+ sg_dma_address(&rx_sg[i]) = dma_dsts[i] + dst->off;
+
+ sg_dma_len(&tx_sg[i]) = len;
+ sg_dma_len(&rx_sg[i]) = len;
+
+ }
+
+ rxd = rx_dev->device_prep_slave_sg(rx_chan, rx_sg, bd_cnt,
+ DMA_DEV_TO_MEM, flags, NULL);
+
+ txd = tx_dev->device_prep_slave_sg(tx_chan, tx_sg, bd_cnt,
+ DMA_MEM_TO_DEV, flags, NULL);
+
+ if (!rxd || !txd) {
+ for (i = 0; i < src->cnt; i++)
+ dma_unmap_single(tx_dev->dev, dma_srcs[i], len,
+ DMA_MEM_TO_DEV);
+ for (i = 0; i < dst->cnt; i++)
+ dma_unmap_single(rx_dev->dev, dma_dsts[i],
+ test_buf_size,
+ DMA_DEV_TO_MEM);
+ pr_warn(
+ "%s: #%u: prep error with src_off=0x%x ",
+ thread_name, total_tests - 1, src_off);
+ pr_warn("dst->off=0x%x len=0x%x\n",
+ dst->off, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+
+ init_completion(&rx_cmp);
+ rxd->callback = dmatest_slave_rx_callback;
+ rxd->callback_param = &rx_cmp;
+ rx_cookie = rxd->tx_submit(rxd);
+
+ init_completion(&tx_cmp);
+ txd->callback = dmatest_slave_tx_callback;
+ txd->callback_param = &tx_cmp;
+ tx_cookie = txd->tx_submit(txd);
+
+ if (dma_submit_error(rx_cookie) ||
+ dma_submit_error(tx_cookie)) {
+ pr_warn(
+ "%s: #%u: submit error %d/%d with src_off=0x%x ",
+ thread_name, total_tests - 1,
+ rx_cookie, tx_cookie, src_off);
+ pr_warn("dst->off=0x%x len=0x%x\n",
+ dst->off, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+ dma_async_issue_pending(tx_chan);
+ dma_async_issue_pending(rx_chan);
+
+ tx_tmo = wait_for_completion_timeout(&tx_cmp, tx_tmo);
+
+ status = dma_async_is_tx_complete(tx_chan, tx_cookie,
+ NULL, NULL);
+
+ if (tx_tmo == 0) {
+ pr_warn("%s: #%u: tx test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_COMPLETE) {
+ pr_warn(
+ "%s: #%u: tx got completion callback, ",
+ thread_name, total_tests - 1);
+ pr_warn("but status is \'%s\'\n",
+ status == DMA_ERROR ? "error" :
+ "in progress");
+ failed_tests++;
+ continue;
+ }
+
+ rx_tmo = wait_for_completion_timeout(&rx_cmp, rx_tmo);
+ status = dma_async_is_tx_complete(rx_chan, rx_cookie,
+ NULL, NULL);
+
+ if (rx_tmo == 0) {
+ pr_warn("%s: #%u: rx test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_COMPLETE) {
+ pr_warn(
+ "%s: #%u: rx got completion callback, ",
+ thread_name, total_tests - 1);
+ pr_warn("but status is \'%s\'\n",
+ status == DMA_ERROR ? "error" :
+ "in progress");
+ failed_tests++;
+ continue;
+ }
+
+ /* Unmap by myself */
+ for (i = 0; i < dst->cnt; i++)
+ dma_unmap_single(rx_dev->dev, dma_dsts[i],
+ test_buf_size, DMA_DEV_TO_MEM);
+
+ error_count = 0;
+
+ pr_debug("%s: verifying source buffer...\n", thread_name);
+ error_count += dmatest_verify(thread->srcs, 0, src_off,
+ 0, PATTERN_SRC, true);
+ error_count += dmatest_verify(thread->srcs, src_off,
+ src_off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, true);
+ error_count += dmatest_verify(thread->srcs, src_off + len,
+ test_buf_size, src_off + len,
+ PATTERN_SRC, true);
+
+ pr_debug("%s: verifying dest buffer...\n",
+ thread->task->comm);
+ error_count += dmatest_verify(thread->dsts, 0, dst->off,
+ 0, PATTERN_DST, false);
+ error_count += dmatest_verify(thread->dsts, dst->off,
+ dst->off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, false);
+ error_count += dmatest_verify(thread->dsts, dst->off + len,
+ test_buf_size, dst->off + len,
+ PATTERN_DST, false);
+
+ if (error_count) {
+ pr_warn("%s: #%u: %u errors with ",
+ thread_name, total_tests - 1, error_count);
+ pr_warn("src_off=0x%x dst->off=0x%x len=0x%x\n",
+ src_off, dst->off, len);
+ failed_tests++;
+ } else {
+ pr_debug("%s: #%u: No errors with ",
+ thread_name, total_tests - 1);
+ pr_debug("src_off=0x%x dst->off=0x%x len=0x%x\n",
+ src_off, dst->off, len);
+ }
+ }
+
+ ret = 0;
+ for (i = 0; thread->dsts[i]; i++)
+ kfree(thread->dsts[i]);
+err_dstbuf:
+ kfree(thread->dsts);
+err_dsts:
+ for (i = 0; thread->srcs[i]; i++)
+ kfree(thread->srcs[i]);
+err_srcbuf:
+ kfree(thread->srcs);
+err_srcs:
+ pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
+ thread_name, total_tests, failed_tests, ret);
+
+ thread->done = true;
+ wake_up(&thread_wait);
+
+ return ret;
+}
+
+static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
+{
+ struct dmatest_slave_thread *thread;
+ struct dmatest_slave_thread *_thread;
+ int ret;
+
+ list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
+ ret = kthread_stop(thread->task);
+ pr_debug("dmatest: thread %s exited with status %d\n",
+ thread->task->comm, ret);
+ list_del(&thread->node);
+ put_task_struct(thread->task);
+ kfree(thread);
+ }
+ kfree(dtc);
+}
+
+static int dmatest_add_slave_threads(struct dmatest_chan *tx_dtc,
+ struct dmatest_chan *rx_dtc)
+{
+ struct dmatest_slave_thread *thread;
+ struct dma_chan *tx_chan = tx_dtc->chan;
+ struct dma_chan *rx_chan = rx_dtc->chan;
+
+ thread = kzalloc(sizeof(struct dmatest_slave_thread), GFP_KERNEL);
+ if (!thread) {
+ pr_warn("dmatest: No memory for slave thread %s-%s\n",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+
+ }
+
+ thread->tx_chan = tx_chan;
+ thread->rx_chan = rx_chan;
+ thread->type = (enum dma_transaction_type)DMA_SLAVE;
+ smp_wmb();
+ thread->task = kthread_run(dmatest_slave_func, thread, "%s-%s",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+ if (IS_ERR(thread->task)) {
+ pr_warn("dmatest: Failed to run thread %s-%s\n",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+ kfree(thread);
+ return PTR_ERR(thread->task);
+ }
+
+ /* srcbuf and dstbuf are allocated by the thread itself */
+ get_task_struct(thread->task);
+ list_add_tail(&thread->node, &tx_dtc->threads);
+
+ /* Added one thread with 2 channels */
+ return 1;
+}
+
+static int dmatest_add_slave_channels(struct dma_chan *tx_chan,
+ struct dma_chan *rx_chan)
+{
+ struct dmatest_chan *tx_dtc;
+ struct dmatest_chan *rx_dtc;
+ unsigned int thread_count = 0;
+
+ tx_dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
+ if (!tx_dtc) {
+ pr_warn("dmatest: No memory for tx %s\n",
+ dma_chan_name(tx_chan));
+ return -ENOMEM;
+ }
+
+ rx_dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
+ if (!rx_dtc) {
+ pr_warn("dmatest: No memory for rx %s\n",
+ dma_chan_name(rx_chan));
+ return -ENOMEM;
+ }
+
+ tx_dtc->chan = tx_chan;
+ rx_dtc->chan = rx_chan;
+ INIT_LIST_HEAD(&tx_dtc->threads);
+ INIT_LIST_HEAD(&rx_dtc->threads);
+
+ dmatest_add_slave_threads(tx_dtc, rx_dtc);
+ thread_count += 1;
+
+ pr_info("dmatest: Started %u threads using %s %s\n",
+ thread_count, dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+
+ list_add_tail(&tx_dtc->node, &dmatest_channels);
+ list_add_tail(&rx_dtc->node, &dmatest_channels);
+ nr_channels += 2;
+
+ if (iterations)
+ wait_event(thread_wait, !is_threaded_test_run(tx_dtc, rx_dtc));
+
+ return 0;
+}
+
+static int xilinx_axidmatest_probe(struct platform_device *pdev)
+{
+ struct dma_chan *chan, *rx_chan;
+ int err;
+
+ chan = dma_request_slave_channel(&pdev->dev, "axidma0");
+ if (IS_ERR(chan)) {
+ pr_err("xilinx_dmatest: No Tx channel\n");
+ return PTR_ERR(chan);
+ }
+
+ rx_chan = dma_request_slave_channel(&pdev->dev, "axidma1");
+ if (IS_ERR(rx_chan)) {
+ err = PTR_ERR(rx_chan);
+ pr_err("xilinx_dmatest: No Rx channel\n");
+ goto free_tx;
+ }
+
+ err = dmatest_add_slave_channels(chan, rx_chan);
+ if (err) {
+ pr_err("xilinx_dmatest: Unable to add channels\n");
+ goto free_rx;
+ }
+
+ return 0;
+
+free_rx:
+ dma_release_channel(rx_chan);
+free_tx:
+ dma_release_channel(chan);
+
+ return err;
+}
+
+static int xilinx_axidmatest_remove(struct platform_device *pdev)
+{
+ struct dmatest_chan *dtc, *_dtc;
+ struct dma_chan *chan;
+
+ list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
+ list_del(&dtc->node);
+ chan = dtc->chan;
+ dmatest_cleanup_channel(dtc);
+ pr_info("xilinx_dmatest: dropped channel %s\n",
+ dma_chan_name(chan));
+ dmaengine_terminate_all(chan);
+ dma_release_channel(chan);
+ }
+ return 0;
+}
+
+static const struct of_device_id xilinx_axidmatest_of_ids[] = {
+ { .compatible = "xlnx,axi-dma-test-1.00.a",},
+ {}
+};
+
+static struct platform_driver xilinx_axidmatest_driver = {
+ .driver = {
+ .name = "xilinx_axidmatest",
+ .owner = THIS_MODULE,
+ .of_match_table = xilinx_axidmatest_of_ids,
+ },
+ .probe = xilinx_axidmatest_probe,
+ .remove = xilinx_axidmatest_remove,
+};
+
+static int __init axidma_init(void)
+{
+ return platform_driver_register(&xilinx_axidmatest_driver);
+
+}
+late_initcall(axidma_init);
+
+static void __exit axidma_exit(void)
+{
+ platform_driver_unregister(&xilinx_axidmatest_driver);
+}
+module_exit(axidma_exit)
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx AXI DMA Test Client");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/cdmatest.c b/drivers/dma/xilinx/cdmatest.c
new file mode 100644
index 000000000000..147679353ee5
--- /dev/null
+++ b/drivers/dma/xilinx/cdmatest.c
@@ -0,0 +1,662 @@
+/*
+ * XILINX CDMA Engine test module
+ *
+ * Copyright (C) 2012 Xilinx, Inc. All rights reserved.
+ *
+ * Based on Atmel DMA Test Client
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/sched/task.h>
+#include <linux/dma/xilinx_dma.h>
+
+static unsigned int test_buf_size = 64;
+module_param(test_buf_size, uint, S_IRUGO);
+MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
+
+static char test_channel[20];
+module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
+MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
+
+static char test_device[20];
+module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
+MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
+
+static unsigned int threads_per_chan = 1;
+module_param(threads_per_chan, uint, S_IRUGO);
+MODULE_PARM_DESC(threads_per_chan,
+ "Number of threads to start per channel (default: 1)");
+
+static unsigned int max_channels;
+module_param(max_channels, uint, S_IRUGO);
+MODULE_PARM_DESC(max_channels,
+ "Maximum number of channels to use (default: all)");
+
+static unsigned int iterations = 5;
+module_param(iterations, uint, S_IRUGO);
+MODULE_PARM_DESC(iterations,
+ "Iterations before stopping test (default: infinite)");
+
+static unsigned int xor_sources = 3;
+module_param(xor_sources, uint, S_IRUGO);
+MODULE_PARM_DESC(xor_sources,
+ "Number of xor source buffers (default: 3)");
+
+static unsigned int pq_sources = 3;
+module_param(pq_sources, uint, S_IRUGO);
+MODULE_PARM_DESC(pq_sources,
+ "Number of p+q source buffers (default: 3)");
+
+/*
+ * Initialization patterns. All bytes in the source buffer has bit 7
+ * set, all bytes in the destination buffer has bit 7 cleared.
+ *
+ * Bit 6 is set for all bytes which are to be copied by the DMA
+ * engine. Bit 5 is set for all bytes which are to be overwritten by
+ * the DMA engine.
+ *
+ * The remaining bits are the inverse of a counter which increments by
+ * one for each byte address.
+ */
+#define PATTERN_SRC 0x80
+#define PATTERN_DST 0x00
+#define PATTERN_COPY 0x40
+#define PATTERN_OVERWRITE 0x20
+#define PATTERN_COUNT_MASK 0x1f
+
+struct cdmatest_thread {
+ struct list_head node;
+ struct task_struct *task;
+ struct dma_chan *chan;
+ u8 **srcs;
+ u8 **dsts;
+ enum dma_transaction_type type;
+ bool done;
+};
+
+struct cdmatest_chan {
+ struct list_head node;
+ struct dma_chan *chan;
+ struct list_head threads;
+};
+
+/*
+ * These are protected by dma_list_mutex since they're only used by
+ * the DMA filter function callback
+ */
+static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
+static LIST_HEAD(cdmatest_channels);
+static unsigned int nr_channels;
+
+static bool is_threaded_test_run(struct cdmatest_chan *tx_dtc)
+{
+ struct cdmatest_thread *thread;
+
+ list_for_each_entry(thread, &tx_dtc->threads, node) {
+ if (!thread->done)
+ return true;
+ }
+
+ return false;
+}
+
+static unsigned long cdmatest_random(void)
+{
+ unsigned long buf;
+
+ get_random_bytes(&buf, sizeof(buf));
+ return buf;
+}
+
+static void cdmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < start + len; i++)
+ buf[i] = PATTERN_SRC | PATTERN_COPY
+ | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < test_buf_size; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ buf++;
+ }
+}
+
+static void cdmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < start + len; i++)
+ buf[i] = PATTERN_DST | PATTERN_OVERWRITE
+ | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < test_buf_size; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ }
+}
+
+static void cdmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
+ unsigned int counter, bool is_srcbuf)
+{
+ u8 diff = actual ^ pattern;
+ u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ const char *thread_name = current->comm;
+
+ if (is_srcbuf)
+ pr_warn(
+ "%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if ((pattern & PATTERN_COPY)
+ && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+ pr_warn(
+ "%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if (diff & PATTERN_SRC)
+ pr_warn(
+ "%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else
+ pr_warn(
+ "%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+}
+
+static unsigned int cdmatest_verify(u8 **bufs, unsigned int start,
+ unsigned int end, unsigned int counter, u8 pattern,
+ bool is_srcbuf)
+{
+ unsigned int i;
+ unsigned int error_count = 0;
+ u8 actual;
+ u8 expected;
+ u8 *buf;
+ unsigned int counter_orig = counter;
+
+ for (; (buf = *bufs); bufs++) {
+ counter = counter_orig;
+ for (i = start; i < end; i++) {
+ actual = buf[i];
+ expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ if (actual != expected) {
+ if (error_count < 32)
+ cdmatest_mismatch(actual, pattern, i,
+ counter, is_srcbuf);
+ error_count++;
+ }
+ counter++;
+ }
+ }
+
+ if (error_count > 32)
+ pr_warn("%s: %u errors suppressed\n",
+ current->comm, error_count - 32);
+
+ return error_count;
+}
+
+static void cdmatest_callback(void *completion)
+{
+ complete(completion);
+}
+
+/*
+ * This function repeatedly tests DMA transfers of various lengths and
+ * offsets for a given operation type until it is told to exit by
+ * kthread_stop(). There may be multiple threads running this function
+ * in parallel for a single channel, and there may be multiple channels
+ * being tested in parallel.
+ *
+ * Before each test, the source and destination buffer is initialized
+ * with a known pattern. This pattern is different depending on
+ * whether it's in an area which is supposed to be copied or
+ * overwritten, and different in the source and destination buffers.
+ * So if the DMA engine doesn't copy exactly what we tell it to copy,
+ * we'll notice.
+ */
+static int cdmatest_func(void *data)
+{
+ struct cdmatest_thread *thread = data;
+ struct dma_chan *chan;
+ const char *thread_name;
+ unsigned int src_off, dst->off, len;
+ unsigned int error_count;
+ unsigned int failed_tests = 0;
+ unsigned int total_tests = 0;
+ dma_cookie_t cookie;
+ enum dma_status status;
+ enum dma_ctrl_flags flags;
+ u8 pq_coefs[pq_sources + 1];
+ int ret;
+ int src->cnt;
+ int dst->cnt;
+ int i;
+
+ thread_name = current->comm;
+
+ ret = -ENOMEM;
+
+ /* JZ: limit testing scope here */
+
+ smp_rmb();
+ chan = thread->chan;
+ if (thread->type == DMA_MEMCPY)
+ src->cnt = dst->cnt = 1;
+ else if (thread->type == DMA_XOR) {
+ src->cnt = xor_sources | 1;
+ /* force odd to ensure dst = src */
+ dst->cnt = 1;
+ } else if (thread->type == DMA_PQ) {
+ src->cnt = pq_sources | 1;
+ /* force odd to ensure dst = src */
+ dst->cnt = 2;
+ for (i = 0; i < src->cnt; i++)
+ pq_coefs[i] = 1;
+ } else
+ goto err_srcs;
+
+ thread->srcs = kcalloc(src->cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->srcs)
+ goto err_srcs;
+ for (i = 0; i < src->cnt; i++) {
+ thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->srcs[i])
+ goto err_srcbuf;
+ }
+ thread->srcs[i] = NULL;
+
+ thread->dsts = kcalloc(dst->cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->dsts)
+ goto err_dsts;
+ for (i = 0; i < dst->cnt; i++) {
+ thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->dsts[i])
+ goto err_dstbuf;
+ }
+ thread->dsts[i] = NULL;
+
+ set_user_nice(current, 10);
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ while (!kthread_should_stop()
+ && !(iterations && total_tests >= iterations)) {
+ struct dma_device *dev = chan->device;
+ struct dma_async_tx_descriptor *tx = NULL;
+ dma_addr_t dma_srcs[src->cnt];
+ dma_addr_t dma_dsts[dst->cnt];
+ struct completion cmp;
+ unsigned long tmo = msecs_to_jiffies(3000);
+ u8 align = 0;
+
+ total_tests++;
+
+ /* honor alignment restrictions */
+ if (thread->type == DMA_MEMCPY)
+ align = dev->copy_align;
+ else if (thread->type == DMA_XOR)
+ align = dev->xor_align;
+ else if (thread->type == DMA_PQ)
+ align = dev->pq_align;
+
+ if (1 << align > test_buf_size) {
+ pr_err("%u-byte buffer too small for %d-byte alignment\n",
+ test_buf_size, 1 << align);
+ break;
+ }
+
+ len = cdmatest_random() % test_buf_size + 1;
+ len = (len >> align) << align;
+ if (!len)
+ len = 1 << align;
+ src_off = cdmatest_random() % (test_buf_size - len + 1);
+ dst->off = cdmatest_random() % (test_buf_size - len + 1);
+
+ src_off = (src_off >> align) << align;
+ dst->off = (dst->off >> align) << align;
+
+ cdmatest_init_srcs(thread->srcs, src_off, len);
+ cdmatest_init_dsts(thread->dsts, dst->off, len);
+
+ for (i = 0; i < src->cnt; i++) {
+ u8 *buf = thread->srcs[i] + src_off;
+
+ dma_srcs[i] = dma_map_single(dev->dev, buf, len,
+ DMA_MEM_TO_DEV);
+ }
+ /* map with DMA_MEM_TO_MEM to force writeback/invalidate */
+ for (i = 0; i < dst->cnt; i++) {
+ dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
+ test_buf_size,
+ DMA_MEM_TO_MEM);
+ }
+
+ if (thread->type == DMA_MEMCPY) {
+ tx = dev->device_prep_dma_memcpy(chan,
+ dma_dsts[0] + dst->off,
+ dma_srcs[0], len,
+ flags);
+
+ } else if (thread->type == DMA_XOR)
+ tx = dev->device_prep_dma_xor(chan,
+ dma_dsts[0] + dst->off,
+ dma_srcs, src->cnt,
+ len, flags);
+ else if (thread->type == DMA_PQ) {
+ dma_addr_t dma_pq[dst->cnt];
+
+ for (i = 0; i < dst->cnt; i++)
+ dma_pq[i] = dma_dsts[i] + dst->off;
+ tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
+ src->cnt, pq_coefs,
+ len, flags);
+ }
+
+ if (!tx) {
+ for (i = 0; i < src->cnt; i++)
+ dma_unmap_single(dev->dev, dma_srcs[i], len,
+ DMA_MEM_TO_DEV);
+ for (i = 0; i < dst->cnt; i++)
+ dma_unmap_single(dev->dev, dma_dsts[i],
+ test_buf_size,
+ DMA_MEM_TO_MEM);
+ pr_warn(
+ "%s: #%u: prep error with src_off=0x%x ",
+ thread_name, total_tests - 1, src_off);
+ pr_warn("dst->off=0x%x len=0x%x\n",
+ dst->off, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+
+ init_completion(&cmp);
+ tx->callback = cdmatest_callback;
+ tx->callback_param = &cmp;
+ cookie = tx->tx_submit(tx);
+
+ if (dma_submit_error(cookie)) {
+ pr_warn(
+ "%s: #%u: submit error %d with src_off=0x%x ",
+ thread_name, total_tests - 1,
+ cookie, src_off);
+ pr_warn("dst->off=0x%x len=0x%x\n",
+ dst->off, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+ dma_async_issue_pending(chan);
+
+ tmo = wait_for_completion_timeout(&cmp, tmo);
+ status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+
+ if (tmo == 0) {
+ pr_warn("%s: #%u: test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_COMPLETE) {
+ pr_warn(
+ "%s: #%u: got completion callback, ",
+ thread_name, total_tests - 1);
+ pr_warn("but status is \'%s\'\n",
+ status == DMA_ERROR ? "error" :
+ "in progress");
+ failed_tests++;
+ continue;
+ }
+
+ /* Unmap by myself */
+ for (i = 0; i < dst->cnt; i++)
+ dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
+ DMA_MEM_TO_MEM);
+
+ error_count = 0;
+
+ pr_debug("%s: verifying source buffer...\n", thread_name);
+ error_count += cdmatest_verify(thread->srcs, 0, src_off,
+ 0, PATTERN_SRC, true);
+ error_count += cdmatest_verify(thread->srcs, src_off,
+ src_off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, true);
+ error_count += cdmatest_verify(thread->srcs, src_off + len,
+ test_buf_size, src_off + len,
+ PATTERN_SRC, true);
+
+ pr_debug("%s: verifying dest buffer...\n",
+ thread->task->comm);
+ error_count += cdmatest_verify(thread->dsts, 0, dst->off,
+ 0, PATTERN_DST, false);
+ error_count += cdmatest_verify(thread->dsts, dst->off,
+ dst->off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, false);
+ error_count += cdmatest_verify(thread->dsts, dst->off + len,
+ test_buf_size, dst->off + len,
+ PATTERN_DST, false);
+
+ if (error_count) {
+ pr_warn("%s: #%u: %u errors with ",
+ thread_name, total_tests - 1, error_count);
+ pr_warn("src_off=0x%x dst->off=0x%x len=0x%x\n",
+ src_off, dst->off, len);
+ failed_tests++;
+ } else {
+ pr_debug("%s: #%u: No errors with ",
+ thread_name, total_tests - 1);
+ pr_debug("src_off=0x%x dst->off=0x%x len=0x%x\n",
+ src_off, dst->off, len);
+ }
+ }
+
+ ret = 0;
+ for (i = 0; thread->dsts[i]; i++)
+ kfree(thread->dsts[i]);
+err_dstbuf:
+ kfree(thread->dsts);
+err_dsts:
+ for (i = 0; thread->srcs[i]; i++)
+ kfree(thread->srcs[i]);
+err_srcbuf:
+ kfree(thread->srcs);
+err_srcs:
+ pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
+ thread_name, total_tests, failed_tests, ret);
+
+ thread->done = true;
+ wake_up(&thread_wait);
+
+ return ret;
+}
+
+static void cdmatest_cleanup_channel(struct cdmatest_chan *dtc)
+{
+ struct cdmatest_thread *thread;
+ struct cdmatest_thread *_thread;
+ int ret;
+
+ list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
+ ret = kthread_stop(thread->task);
+ pr_debug("cdmatest: thread %s exited with status %d\n",
+ thread->task->comm, ret);
+ list_del(&thread->node);
+ put_task_struct(thread->task);
+ kfree(thread);
+ }
+ kfree(dtc);
+}
+
+static int cdmatest_add_threads(struct cdmatest_chan *dtc,
+ enum dma_transaction_type type)
+{
+ struct cdmatest_thread *thread;
+ struct dma_chan *chan = dtc->chan;
+ char *op;
+ unsigned int i;
+
+ if (type == DMA_MEMCPY)
+ op = "copy";
+ else if (type == DMA_XOR)
+ op = "xor";
+ else if (type == DMA_PQ)
+ op = "pq";
+ else
+ return -EINVAL;
+
+ for (i = 0; i < threads_per_chan; i++) {
+ thread = kzalloc(sizeof(struct cdmatest_thread), GFP_KERNEL);
+ if (!thread) {
+ pr_warn("cdmatest: No memory for %s-%s%u\n",
+ dma_chan_name(chan), op, i);
+
+ break;
+ }
+ thread->chan = dtc->chan;
+ thread->type = type;
+ smp_wmb();
+ thread->task = kthread_run(cdmatest_func, thread, "%s-%s%u",
+ dma_chan_name(chan), op, i);
+ if (IS_ERR(thread->task)) {
+ pr_warn("cdmatest: Failed to run thread %s-%s%u\n",
+ dma_chan_name(chan), op, i);
+ kfree(thread);
+ break;
+ }
+
+ /* srcbuf and dstbuf are allocated by the thread itself */
+ get_task_struct(thread->task);
+ list_add_tail(&thread->node, &dtc->threads);
+ }
+
+ return i;
+}
+
+static int cdmatest_add_channel(struct dma_chan *chan)
+{
+ struct cdmatest_chan *dtc;
+ struct dma_device *dma_dev = chan->device;
+ unsigned int thread_count = 0;
+ int cnt;
+
+ dtc = kmalloc(sizeof(struct cdmatest_chan), GFP_KERNEL);
+ if (!dtc) {
+ pr_warn("cdmatest: No memory for %s\n", dma_chan_name(chan));
+ return -ENOMEM;
+ }
+
+ dtc->chan = chan;
+ INIT_LIST_HEAD(&dtc->threads);
+
+ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
+ cnt = cdmatest_add_threads(dtc, DMA_MEMCPY);
+ thread_count += cnt > 0 ? cnt : 0;
+ }
+ if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
+ cnt = cdmatest_add_threads(dtc, DMA_XOR);
+ thread_count += cnt > 0 ? cnt : 0;
+ }
+ if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
+ cnt = cdmatest_add_threads(dtc, DMA_PQ);
+ thread_count += cnt > 0 ? cnt : 0;
+ }
+
+ pr_info("cdmatest: Started %u threads using %s\n",
+ thread_count, dma_chan_name(chan));
+
+ list_add_tail(&dtc->node, &cdmatest_channels);
+ nr_channels++;
+
+ if (iterations)
+ wait_event(thread_wait, !is_threaded_test_run(dtc));
+
+ return 0;
+}
+
+static int xilinx_cdmatest_probe(struct platform_device *pdev)
+{
+ struct dma_chan *chan;
+ int err;
+
+ chan = dma_request_slave_channel(&pdev->dev, "cdma");
+ if (IS_ERR(chan)) {
+ pr_err("xilinx_cdmatest: No channel\n");
+ return PTR_ERR(chan);
+ }
+
+ err = cdmatest_add_channel(chan);
+ if (err) {
+ pr_err("xilinx_cdmatest: Unable to add channel\n");
+ goto free_tx;
+ }
+ return 0;
+
+free_tx:
+ dma_release_channel(chan);
+
+ return err;
+}
+
+static int xilinx_cdmatest_remove(struct platform_device *pdev)
+{
+ struct cdmatest_chan *dtc, *_dtc;
+ struct dma_chan *chan;
+
+ list_for_each_entry_safe(dtc, _dtc, &cdmatest_channels, node) {
+ list_del(&dtc->node);
+ chan = dtc->chan;
+ cdmatest_cleanup_channel(dtc);
+ pr_info("xilinx_cdmatest: dropped channel %s\n",
+ dma_chan_name(chan));
+ dma_release_channel(chan);
+ }
+ return 0;
+}
+
+static const struct of_device_id xilinx_cdmatest_of_ids[] = {
+ { .compatible = "xlnx,axi-cdma-test-1.00.a", },
+ {}
+};
+
+static struct platform_driver xilinx_cdmatest_driver = {
+ .driver = {
+ .name = "xilinx_cdmatest",
+ .owner = THIS_MODULE,
+ .of_match_table = xilinx_cdmatest_of_ids,
+ },
+ .probe = xilinx_cdmatest_probe,
+ .remove = xilinx_cdmatest_remove,
+};
+
+static int __init cdma_init(void)
+{
+ return platform_driver_register(&xilinx_cdmatest_driver);
+
+}
+late_initcall(cdma_init);
+
+static void __exit cdma_exit(void)
+{
+ platform_driver_unregister(&xilinx_cdmatest_driver);
+}
+module_exit(cdma_exit)
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx AXI CDMA Test Client");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/vdmatest.c b/drivers/dma/xilinx/vdmatest.c
new file mode 100644
index 000000000000..f578e97df8b8
--- /dev/null
+++ b/drivers/dma/xilinx/vdmatest.c
@@ -0,0 +1,662 @@
+/*
+ * XILINX VDMA Engine test client driver
+ *
+ * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
+ *
+ * Based on Atmel DMA Test Client
+ *
+ * Description:
+ * This is a simple Xilinx VDMA test client for AXI VDMA driver.
+ * This test assumes both the channels of VDMA are enabled in the
+ * hardware design and configured in back-to-back connection. Test
+ * starts by pumping the data onto one channel (MM2S) and then
+ * compares the data that is received on the other channel (S2MM).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma/xilinx_dma.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/sched/task.h>
+#include <linux/wait.h>
+
+static unsigned int test_buf_size = 64;
+module_param(test_buf_size, uint, 0444);
+MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
+
+static unsigned int iterations = 1;
+module_param(iterations, uint, 0444);
+MODULE_PARM_DESC(iterations,
+ "Iterations before stopping test (default: infinite)");
+
+static unsigned int hsize = 64;
+module_param(hsize, uint, 0444);
+MODULE_PARM_DESC(hsize, "Horizontal size in bytes");
+
+static unsigned int vsize = 32;
+module_param(vsize, uint, 0444);
+MODULE_PARM_DESC(vsize, "Vertical size in bytes");
+
+/*
+ * Initialization patterns. All bytes in the source buffer has bit 7
+ * set, all bytes in the destination buffer has bit 7 cleared.
+ *
+ * Bit 6 is set for all bytes which are to be copied by the DMA
+ * engine. Bit 5 is set for all bytes which are to be overwritten by
+ * the DMA engine.
+ *
+ * The remaining bits are the inverse of a counter which increments by
+ * one for each byte address.
+ */
+#define PATTERN_SRC 0x80
+#define PATTERN_DST 0x00
+#define PATTERN_COPY 0x40
+#define PATTERN_OVERWRITE 0x20
+#define PATTERN_COUNT_MASK 0x1f
+
+/* Maximum number of frame buffers */
+#define MAX_NUM_FRAMES 32
+
+/**
+ * struct vdmatest_slave_thread - VDMA test thread
+ * @node: Thread node
+ * @task: Task structure pointer
+ * @tx_chan: Tx channel pointer
+ * @rx_chan: Rx Channel pointer
+ * @srcs: Source buffer
+ * @dsts: Destination buffer
+ * @type: DMA transaction type
+ */
+struct xilinx_vdmatest_slave_thread {
+ struct list_head node;
+ struct task_struct *task;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ u8 **srcs;
+ u8 **dsts;
+ enum dma_transaction_type type;
+ bool done;
+};
+
+/**
+ * struct vdmatest_chan - VDMA Test channel
+ * @node: Channel node
+ * @chan: DMA channel pointer
+ * @threads: List of VDMA test threads
+ */
+struct xilinx_vdmatest_chan {
+ struct list_head node;
+ struct dma_chan *chan;
+ struct list_head threads;
+};
+
+/* Global variables */
+static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
+static LIST_HEAD(xilinx_vdmatest_channels);
+static unsigned int nr_channels;
+static unsigned int frm_cnt;
+static dma_addr_t dma_srcs[MAX_NUM_FRAMES];
+static dma_addr_t dma_dsts[MAX_NUM_FRAMES];
+static struct dma_interleaved_template xt;
+
+static bool is_threaded_test_run(struct xilinx_vdmatest_chan *tx_dtc,
+ struct xilinx_vdmatest_chan *rx_dtc)
+{
+ struct xilinx_vdmatest_slave_thread *thread;
+ int ret = false;
+
+ list_for_each_entry(thread, &tx_dtc->threads, node) {
+ if (!thread->done)
+ ret = true;
+ }
+
+ list_for_each_entry(thread, &rx_dtc->threads, node) {
+ if (!thread->done)
+ ret = true;
+ }
+ return ret;
+}
+
+static void xilinx_vdmatest_init_srcs(u8 **bufs, unsigned int start,
+ unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ for (; i < start + len; i++)
+ buf[i] = PATTERN_SRC | PATTERN_COPY
+ | (~i & PATTERN_COUNT_MASK);
+ for (; i < test_buf_size; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ buf++;
+ }
+}
+
+static void xilinx_vdmatest_init_dsts(u8 **bufs, unsigned int start,
+ unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ for (; i < start + len; i++)
+ buf[i] = PATTERN_DST | PATTERN_OVERWRITE
+ | (~i & PATTERN_COUNT_MASK);
+ for (; i < test_buf_size; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ }
+}
+
+static void xilinx_vdmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
+ unsigned int counter, bool is_srcbuf)
+{
+ u8 diff = actual ^ pattern;
+ u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ const char *thread_name = current->comm;
+
+ if (is_srcbuf)
+ pr_warn(
+ "%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if ((pattern & PATTERN_COPY)
+ && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+ pr_warn(
+ "%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if (diff & PATTERN_SRC)
+ pr_warn(
+ "%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else
+ pr_warn(
+ "%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+}
+
+static unsigned int xilinx_vdmatest_verify(u8 **bufs, unsigned int start,
+ unsigned int end, unsigned int counter, u8 pattern,
+ bool is_srcbuf)
+{
+ unsigned int i, error_count = 0;
+ u8 actual, expected, *buf;
+ unsigned int counter_orig = counter;
+
+ for (; (buf = *bufs); bufs++) {
+ counter = counter_orig;
+ for (i = start; i < end; i++) {
+ actual = buf[i];
+ expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ if (actual != expected) {
+ if (error_count < 32)
+ xilinx_vdmatest_mismatch(actual,
+ pattern, i,
+ counter, is_srcbuf);
+ error_count++;
+ }
+ counter++;
+ }
+ }
+
+ if (error_count > 32)
+ pr_warn("%s: %u errors suppressed\n",
+ current->comm, error_count - 32);
+
+ return error_count;
+}
+
+static void xilinx_vdmatest_slave_tx_callback(void *completion)
+{
+ pr_debug("Got tx callback\n");
+ complete(completion);
+}
+
+static void xilinx_vdmatest_slave_rx_callback(void *completion)
+{
+ pr_debug("Got rx callback\n");
+ complete(completion);
+}
+
+/*
+ * Function for slave transfers
+ * Each thread requires 2 channels, one for transmit, and one for receive
+ */
+static int xilinx_vdmatest_slave_func(void *data)
+{
+ struct xilinx_vdmatest_slave_thread *thread = data;
+ struct dma_chan *tx_chan, *rx_chan;
+ const char *thread_name;
+ unsigned int len, error_count;
+ unsigned int failed_tests = 0, total_tests = 0;
+ dma_cookie_t tx_cookie = 0, rx_cookie = 0;
+ enum dma_status status;
+ enum dma_ctrl_flags flags;
+ int ret = -ENOMEM, i;
+ struct xilinx_vdma_config config;
+
+ thread_name = current->comm;
+
+ /* Limit testing scope here */
+ test_buf_size = hsize * vsize;
+
+ /* This barrier ensures 'thread' is initialized and
+ * we get valid DMA channels
+ */
+ smp_rmb();
+ tx_chan = thread->tx_chan;
+ rx_chan = thread->rx_chan;
+
+ thread->srcs = kcalloc(frm_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->srcs)
+ goto err_srcs;
+ for (i = 0; i < frm_cnt; i++) {
+ thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->srcs[i])
+ goto err_srcbuf;
+ }
+
+ thread->dsts = kcalloc(frm_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->dsts)
+ goto err_dsts;
+ for (i = 0; i < frm_cnt; i++) {
+ thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->dsts[i])
+ goto err_dstbuf;
+ }
+
+ set_user_nice(current, 10);
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ while (!kthread_should_stop()
+ && !(iterations && total_tests >= iterations)) {
+ struct dma_device *tx_dev = tx_chan->device;
+ struct dma_device *rx_dev = rx_chan->device;
+ struct dma_async_tx_descriptor *txd = NULL;
+ struct dma_async_tx_descriptor *rxd = NULL;
+ struct completion rx_cmp, tx_cmp;
+ unsigned long rx_tmo =
+ msecs_to_jiffies(30000); /* RX takes longer */
+ unsigned long tx_tmo = msecs_to_jiffies(30000);
+ u8 align = 0;
+
+ total_tests++;
+
+ /* honor larger alignment restrictions */
+ align = tx_dev->copy_align;
+ if (rx_dev->copy_align > align)
+ align = rx_dev->copy_align;
+
+ if (1 << align > test_buf_size) {
+ pr_err("%u-byte buffer too small for %d-byte alignment\n",
+ test_buf_size, 1 << align);
+ break;
+ }
+
+ len = test_buf_size;
+ xilinx_vdmatest_init_srcs(thread->srcs, 0, len);
+ xilinx_vdmatest_init_dsts(thread->dsts, 0, len);
+
+ /* Zero out configuration */
+ memset(&config, 0, sizeof(struct xilinx_vdma_config));
+
+ /* Set up hardware configuration information */
+ config.frm_cnt_en = 1;
+ config.coalesc = frm_cnt * 10;
+ config.park = 1;
+ xilinx_vdma_channel_set_config(tx_chan, &config);
+
+ xilinx_vdma_channel_set_config(rx_chan, &config);
+
+ for (i = 0; i < frm_cnt; i++) {
+ dma_dsts[i] = dma_map_single(rx_dev->dev,
+ thread->dsts[i],
+ test_buf_size,
+ DMA_DEV_TO_MEM);
+
+ if (dma_mapping_error(rx_dev->dev, dma_dsts[i])) {
+ failed_tests++;
+ continue;
+ }
+ xt.dst_start = dma_dsts[i];
+ xt.dir = DMA_DEV_TO_MEM;
+ xt.numf = vsize;
+ xt.sgl[0].size = hsize;
+ xt.sgl[0].icg = 0;
+ xt.frame_size = 1;
+ rxd = rx_dev->device_prep_interleaved_dma(rx_chan,
+ &xt, flags);
+ rx_cookie = rxd->tx_submit(rxd);
+ }
+
+ for (i = 0; i < frm_cnt; i++) {
+ u8 *buf = thread->srcs[i];
+
+ dma_srcs[i] = dma_map_single(tx_dev->dev, buf, len,
+ DMA_MEM_TO_DEV);
+
+ if (dma_mapping_error(tx_dev->dev, dma_srcs[i])) {
+ failed_tests++;
+ continue;
+ }
+ xt.src_start = dma_srcs[i];
+ xt.dir = DMA_MEM_TO_DEV;
+ xt.numf = vsize;
+ xt.sgl[0].size = hsize;
+ xt.sgl[0].icg = 0;
+ xt.frame_size = 1;
+ txd = tx_dev->device_prep_interleaved_dma(tx_chan,
+ &xt, flags);
+ tx_cookie = txd->tx_submit(txd);
+ }
+
+ if (!rxd || !txd) {
+ for (i = 0; i < frm_cnt; i++)
+ dma_unmap_single(tx_dev->dev, dma_srcs[i], len,
+ DMA_MEM_TO_DEV);
+ for (i = 0; i < frm_cnt; i++)
+ dma_unmap_single(rx_dev->dev, dma_dsts[i],
+ test_buf_size,
+ DMA_DEV_TO_MEM);
+ pr_warn("%s: #%u: prep error with len=0x%x ",
+ thread_name, total_tests - 1, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+
+ init_completion(&rx_cmp);
+ rxd->callback = xilinx_vdmatest_slave_rx_callback;
+ rxd->callback_param = &rx_cmp;
+
+ init_completion(&tx_cmp);
+ txd->callback = xilinx_vdmatest_slave_tx_callback;
+ txd->callback_param = &tx_cmp;
+
+ if (dma_submit_error(rx_cookie) ||
+ dma_submit_error(tx_cookie)) {
+ pr_warn("%s: #%u: submit error %d/%d with len=0x%x ",
+ thread_name, total_tests - 1,
+ rx_cookie, tx_cookie, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+ dma_async_issue_pending(tx_chan);
+ dma_async_issue_pending(rx_chan);
+
+ tx_tmo = wait_for_completion_timeout(&tx_cmp, tx_tmo);
+
+ status = dma_async_is_tx_complete(tx_chan, tx_cookie,
+ NULL, NULL);
+
+ if (tx_tmo == 0) {
+ pr_warn("%s: #%u: tx test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_COMPLETE) {
+ pr_warn(
+ "%s: #%u: tx got completion callback, ",
+ thread_name, total_tests - 1);
+ pr_warn("but status is \'%s\'\n",
+ status == DMA_ERROR ? "error" :
+ "in progress");
+ failed_tests++;
+ continue;
+ }
+
+ rx_tmo = wait_for_completion_timeout(&rx_cmp, rx_tmo);
+ status = dma_async_is_tx_complete(rx_chan, rx_cookie,
+ NULL, NULL);
+
+ if (rx_tmo == 0) {
+ pr_warn("%s: #%u: rx test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_COMPLETE) {
+ pr_warn(
+ "%s: #%u: rx got completion callback, ",
+ thread_name, total_tests - 1);
+ pr_warn("but status is \'%s\'\n",
+ status == DMA_ERROR ? "error" :
+ "in progress");
+ failed_tests++;
+ continue;
+ }
+
+ /* Unmap by myself */
+ for (i = 0; i < frm_cnt; i++)
+ dma_unmap_single(rx_dev->dev, dma_dsts[i],
+ test_buf_size, DMA_DEV_TO_MEM);
+
+ error_count = 0;
+
+ pr_debug("%s: verifying source buffer...\n", thread_name);
+ error_count += xilinx_vdmatest_verify(thread->srcs, 0, 0,
+ 0, PATTERN_SRC, true);
+ error_count += xilinx_vdmatest_verify(thread->srcs, 0,
+ len, 0, PATTERN_SRC | PATTERN_COPY, true);
+ error_count += xilinx_vdmatest_verify(thread->srcs, len,
+ test_buf_size, len, PATTERN_SRC, true);
+
+ pr_debug("%s: verifying dest buffer...\n",
+ thread->task->comm);
+ error_count += xilinx_vdmatest_verify(thread->dsts, 0, 0,
+ 0, PATTERN_DST, false);
+ error_count += xilinx_vdmatest_verify(thread->dsts, 0,
+ len, 0, PATTERN_SRC | PATTERN_COPY, false);
+ error_count += xilinx_vdmatest_verify(thread->dsts, len,
+ test_buf_size, len, PATTERN_DST, false);
+
+ if (error_count) {
+ pr_warn("%s: #%u: %u errors with len=0x%x\n",
+ thread_name, total_tests - 1, error_count, len);
+ failed_tests++;
+ } else {
+ pr_debug("%s: #%u: No errors with len=0x%x\n",
+ thread_name, total_tests - 1, len);
+ }
+ }
+
+ ret = 0;
+ for (i = 0; thread->dsts[i]; i++)
+ kfree(thread->dsts[i]);
+err_dstbuf:
+ kfree(thread->dsts);
+err_dsts:
+ for (i = 0; thread->srcs[i]; i++)
+ kfree(thread->srcs[i]);
+err_srcbuf:
+ kfree(thread->srcs);
+err_srcs:
+ pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
+ thread_name, total_tests, failed_tests, ret);
+
+ thread->done = true;
+ wake_up(&thread_wait);
+
+ return ret;
+}
+
+static void xilinx_vdmatest_cleanup_channel(struct xilinx_vdmatest_chan *dtc)
+{
+ struct xilinx_vdmatest_slave_thread *thread, *_thread;
+ int ret;
+
+ list_for_each_entry_safe(thread, _thread,
+ &dtc->threads, node) {
+ ret = kthread_stop(thread->task);
+ pr_info("xilinx_vdmatest: thread %s exited with status %d\n",
+ thread->task->comm, ret);
+ list_del(&thread->node);
+ put_task_struct(thread->task);
+ kfree(thread);
+ }
+ kfree(dtc);
+}
+
+static int
+xilinx_vdmatest_add_slave_threads(struct xilinx_vdmatest_chan *tx_dtc,
+ struct xilinx_vdmatest_chan *rx_dtc)
+{
+ struct xilinx_vdmatest_slave_thread *thread;
+ struct dma_chan *tx_chan = tx_dtc->chan;
+ struct dma_chan *rx_chan = rx_dtc->chan;
+
+ thread = kzalloc(sizeof(struct xilinx_vdmatest_slave_thread),
+ GFP_KERNEL);
+ if (!thread)
+ pr_warn("xilinx_vdmatest: No memory for slave thread %s-%s\n",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+
+ thread->tx_chan = tx_chan;
+ thread->rx_chan = rx_chan;
+ thread->type = (enum dma_transaction_type)DMA_SLAVE;
+
+ /* This barrier ensures the DMA channels in the 'thread'
+ * are initialized
+ */
+ smp_wmb();
+ thread->task = kthread_run(xilinx_vdmatest_slave_func, thread, "%s-%s",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+ if (IS_ERR(thread->task)) {
+ pr_warn("xilinx_vdmatest: Failed to run thread %s-%s\n",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+ kfree(thread);
+ return PTR_ERR(thread->task);
+ }
+
+ get_task_struct(thread->task);
+ list_add_tail(&thread->node, &tx_dtc->threads);
+
+ /* Added one thread with 2 channels */
+ return 1;
+}
+
+static int xilinx_vdmatest_add_slave_channels(struct dma_chan *tx_chan,
+ struct dma_chan *rx_chan)
+{
+ struct xilinx_vdmatest_chan *tx_dtc, *rx_dtc;
+ unsigned int thread_count = 0;
+
+ tx_dtc = kmalloc(sizeof(struct xilinx_vdmatest_chan), GFP_KERNEL);
+ if (!tx_dtc)
+ return -ENOMEM;
+
+ rx_dtc = kmalloc(sizeof(struct xilinx_vdmatest_chan), GFP_KERNEL);
+ if (!rx_dtc)
+ return -ENOMEM;
+
+ tx_dtc->chan = tx_chan;
+ rx_dtc->chan = rx_chan;
+ INIT_LIST_HEAD(&tx_dtc->threads);
+ INIT_LIST_HEAD(&rx_dtc->threads);
+
+ xilinx_vdmatest_add_slave_threads(tx_dtc, rx_dtc);
+ thread_count += 1;
+
+ pr_info("xilinx_vdmatest: Started %u threads using %s %s\n",
+ thread_count, dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+
+ list_add_tail(&tx_dtc->node, &xilinx_vdmatest_channels);
+ list_add_tail(&rx_dtc->node, &xilinx_vdmatest_channels);
+ nr_channels += 2;
+
+ if (iterations)
+ wait_event(thread_wait, !is_threaded_test_run(tx_dtc, rx_dtc));
+
+ return 0;
+}
+
+static int xilinx_vdmatest_probe(struct platform_device *pdev)
+{
+ struct dma_chan *chan, *rx_chan;
+ int err;
+
+ err = of_property_read_u32(pdev->dev.of_node,
+ "xlnx,num-fstores", &frm_cnt);
+ if (err < 0) {
+ pr_err("xilinx_vdmatest: missing xlnx,num-fstores property\n");
+ return err;
+ }
+
+ chan = dma_request_slave_channel(&pdev->dev, "vdma0");
+ if (IS_ERR(chan)) {
+ pr_err("xilinx_vdmatest: No Tx channel\n");
+ return PTR_ERR(chan);
+ }
+
+ rx_chan = dma_request_slave_channel(&pdev->dev, "vdma1");
+ if (IS_ERR(rx_chan)) {
+ err = PTR_ERR(rx_chan);
+ pr_err("xilinx_vdmatest: No Rx channel\n");
+ goto free_tx;
+ }
+
+ err = xilinx_vdmatest_add_slave_channels(chan, rx_chan);
+ if (err) {
+ pr_err("xilinx_vdmatest: Unable to add channels\n");
+ goto free_rx;
+ }
+ return 0;
+
+free_rx:
+ dma_release_channel(rx_chan);
+free_tx:
+ dma_release_channel(chan);
+
+ return err;
+}
+
+static int xilinx_vdmatest_remove(struct platform_device *pdev)
+{
+ struct xilinx_vdmatest_chan *dtc, *_dtc;
+ struct dma_chan *chan;
+
+ list_for_each_entry_safe(dtc, _dtc, &xilinx_vdmatest_channels, node) {
+ list_del(&dtc->node);
+ chan = dtc->chan;
+ xilinx_vdmatest_cleanup_channel(dtc);
+ pr_info("xilinx_vdmatest: dropped channel %s\n",
+ dma_chan_name(chan));
+ dmaengine_terminate_async(chan);
+ dma_release_channel(chan);
+ }
+ return 0;
+}
+
+static const struct of_device_id xilinx_vdmatest_of_ids[] = {
+ { .compatible = "xlnx,axi-vdma-test-1.00.a",},
+ {}
+};
+
+static struct platform_driver xilinx_vdmatest_driver = {
+ .driver = {
+ .name = "xilinx_vdmatest",
+ .owner = THIS_MODULE,
+ .of_match_table = xilinx_vdmatest_of_ids,
+ },
+ .probe = xilinx_vdmatest_probe,
+ .remove = xilinx_vdmatest_remove,
+};
+
+module_platform_driver(xilinx_vdmatest_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx AXI VDMA Test Client");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 2ee86ca5ca5a..1071342a8ab3 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -119,7 +119,7 @@
#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
/* HW specific definitions */
-#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
+#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
@@ -173,18 +173,6 @@
#define XILINX_DMA_NUM_DESCS 255
#define XILINX_DMA_NUM_APP_WORDS 5
-/* Multi-Channel DMA Descriptor offsets*/
-#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
-#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
-
-/* Multi-Channel DMA Masks/Shifts */
-#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
-#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
-#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
-#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
-#define XILINX_DMA_BD_STRIDE_SHIFT 0
-#define XILINX_DMA_BD_VSIZE_SHIFT 19
-
/* AXI CDMA Specific Registers/Offsets */
#define XILINX_CDMA_REG_SRCADDR 0x18
#define XILINX_CDMA_REG_DSTADDR 0x20
@@ -221,8 +209,8 @@ struct xilinx_vdma_desc_hw {
* @next_desc_msb: MSB of Next Descriptor Pointer @0x04
* @buf_addr: Buffer address @0x08
* @buf_addr_msb: MSB of Buffer address @0x0C
- * @mcdma_control: Control field for mcdma @0x10
- * @vsize_stride: Vsize and Stride field for mcdma @0x14
+ * @reserved1: Reserved @0x10
+ * @reserved2: Reserved @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
* @app: APP Fields @0x20 - 0x30
@@ -232,8 +220,8 @@ struct xilinx_axidma_desc_hw {
u32 next_desc_msb;
u32 buf_addr;
u32 buf_addr_msb;
- u32 mcdma_control;
- u32 vsize_stride;
+ u32 reserved1;
+ u32 reserved2;
u32 control;
u32 status;
u32 app[XILINX_DMA_NUM_APP_WORDS];
@@ -346,7 +334,6 @@ struct xilinx_dma_tx_descriptor {
* @cyclic_seg_p: Physical allocated segments base for cyclic dma
* @start_transfer: Differentiate b/w DMA IP's transfer
* @stop_transfer: Differentiate b/w DMA IP's quiesce
- * @tdest: TDEST value for mcdma
* @has_vflip: S2MM vertical flip
*/
struct xilinx_dma_chan {
@@ -383,7 +370,6 @@ struct xilinx_dma_chan {
dma_addr_t cyclic_seg_p;
void (*start_transfer)(struct xilinx_dma_chan *chan);
int (*stop_transfer)(struct xilinx_dma_chan *chan);
- u16 tdest;
bool has_vflip;
};
@@ -414,7 +400,6 @@ struct xilinx_dma_config {
* @dev: Device Structure
* @common: DMA device structure
* @chan: Driver specific DMA channel
- * @mcdma: Specifies whether Multi-Channel is present or not
* @flush_on_fsync: Flush on frame sync
* @ext_addr: Indicates 64 bit addressing is supported by dma device
* @pdev: Platform device structure pointer
@@ -433,7 +418,6 @@ struct xilinx_dma_device {
struct device *dev;
struct dma_device common;
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
- bool mcdma;
u32 flush_on_fsync;
bool ext_addr;
struct platform_device *pdev;
@@ -1281,10 +1265,10 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->err)
return;
- if (list_empty(&chan->pending_list))
+ if (!chan->idle)
return;
- if (!chan->idle)
+ if (list_empty(&chan->pending_list))
return;
head_desc = list_first_entry(&chan->pending_list,
@@ -1303,53 +1287,23 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
}
- if (chan->has_sg && !chan->xdev->mcdma)
+ if (chan->has_sg)
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
head_desc->async_tx.phys);
- if (chan->has_sg && chan->xdev->mcdma) {
- if (chan->direction == DMA_MEM_TO_DEV) {
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- } else {
- if (!chan->tdest) {
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- } else {
- dma_ctrl_write(chan,
- XILINX_DMA_MCRX_CDESC(chan->tdest),
- head_desc->async_tx.phys);
- }
- }
- }
-
xilinx_dma_start(chan);
if (chan->err)
return;
/* Start the transfer */
- if (chan->has_sg && !chan->xdev->mcdma) {
+ if (chan->has_sg) {
if (chan->cyclic)
xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
chan->cyclic_seg_v->phys);
else
xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
tail_segment->phys);
- } else if (chan->has_sg && chan->xdev->mcdma) {
- if (chan->direction == DMA_MEM_TO_DEV) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- if (!chan->tdest) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- dma_ctrl_write(chan,
- XILINX_DMA_MCRX_TDESC(chan->tdest),
- tail_segment->phys);
- }
- }
} else {
struct xilinx_axidma_tx_segment *segment;
struct xilinx_axidma_desc_hw *hw;
@@ -1764,6 +1718,116 @@ error:
}
/**
+ * xilinx_cdma_prep_sg - prepare descriptors for a memory sg transaction
+ * @dchan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_cdma_prep_sg(
+ struct dma_chan *dchan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_cdma_tx_segment *segment, *prev = NULL;
+ struct xilinx_cdma_desc_hw *hw;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+
+ if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
+ return NULL;
+
+ if (unlikely(dst_sg == NULL || src_sg == NULL))
+ return NULL;
+
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+ /*
+ * loop until there is either no more source or no more destination
+ * scatterlist entry
+ */
+ while (true) {
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, chan->xdev->max_buffer_len);
+ if (len == 0)
+ goto fetch;
+
+ /* Allocate the link descriptor from DMA pool */
+ segment = xilinx_cdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+ hw = &segment->hw;
+ hw->control = len;
+ hw->src_addr = dma_src;
+ hw->dest_addr = dma_dst;
+ if (chan->ext_addr) {
+ hw->src_addr_msb = upper_32_bits(dma_src);
+ hw->dest_addr_msb = upper_32_bits(dma_dst);
+ }
+
+ if (prev)
+ prev->hw.next_desc = segment->phys;
+
+ prev = segment;
+ dst_avail -= len;
+ src_avail -= len;
+ list_add_tail(&segment->node, &desc->segments);
+
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ /* Link the last hardware descriptor with the first. */
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_cdma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+ prev->hw.next_desc = segment->phys;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
* @sgl: scatterlist to transfer to/from
@@ -1970,90 +2034,6 @@ error:
}
/**
- * xilinx_dma_prep_interleaved - prepare a descriptor for a
- * DMA_SLAVE transaction
- * @dchan: DMA channel
- * @xt: Interleaved template pointer
- * @flags: transfer ack flags
- *
- * Return: Async transaction descriptor on success and NULL on failure
- */
-static struct dma_async_tx_descriptor *
-xilinx_dma_prep_interleaved(struct dma_chan *dchan,
- struct dma_interleaved_template *xt,
- unsigned long flags)
-{
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment;
- struct xilinx_axidma_desc_hw *hw;
-
- if (!is_slave_direction(xt->dir))
- return NULL;
-
- if (!xt->numf || !xt->sgl[0].size)
- return NULL;
-
- if (xt->frame_size != 1)
- return NULL;
-
- /* Allocate a transaction descriptor. */
- desc = xilinx_dma_alloc_tx_descriptor(chan);
- if (!desc)
- return NULL;
-
- chan->direction = xt->dir;
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
-
- /* Get a free segment */
- segment = xilinx_axidma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
-
- hw = &segment->hw;
-
- /* Fill in the descriptor */
- if (xt->dir != DMA_MEM_TO_DEV)
- hw->buf_addr = xt->dst_start;
- else
- hw->buf_addr = xt->src_start;
-
- hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
- hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
- XILINX_DMA_BD_VSIZE_MASK;
- hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
- XILINX_DMA_BD_STRIDE_MASK;
- hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
-
- /*
- * Insert the segment into the descriptor segments
- * list.
- */
- list_add_tail(&segment->node, &desc->segments);
-
-
- segment = list_first_entry(&desc->segments,
- struct xilinx_axidma_tx_segment, node);
- desc->async_tx.phys = segment->phys;
-
- /* For the last DMA_MEM_TO_DEV transfer, set EOP */
- if (xt->dir == DMA_MEM_TO_DEV) {
- segment->hw.control |= XILINX_DMA_BD_SOP;
- segment = list_last_entry(&desc->segments,
- struct xilinx_axidma_tx_segment,
- node);
- segment->hw.control |= XILINX_DMA_BD_EOP;
- }
-
- return &desc->async_tx;
-
-error:
- xilinx_dma_free_tx_descriptor(chan, desc);
- return NULL;
-}
-
-/**
* xilinx_dma_terminate_all - Halt the channel and free descriptors
* @dchan: Driver specific DMA Channel pointer
*
@@ -2065,16 +2045,17 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
u32 reg;
int err;
- if (chan->cyclic)
- xilinx_dma_chan_reset(chan);
-
- err = chan->stop_transfer(chan);
- if (err) {
- dev_err(chan->dev, "Cannot stop channel %p: %x\n",
- chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
- chan->err = true;
+ if (!chan->cyclic) {
+ err = chan->stop_transfer(chan);
+ if (err) {
+ dev_err(chan->dev, "Cannot stop channel %p: %x\n",
+ chan, dma_ctrl_read(chan,
+ XILINX_DMA_REG_DMASR));
+ chan->err = true;
+ }
}
+ xilinx_dma_chan_reset(chan);
/* Remove and free all of the descriptors in the lists */
xilinx_dma_free_descriptors(chan);
chan->idle = true;
@@ -2440,7 +2421,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
chan->direction = DMA_MEM_TO_DEV;
chan->id = chan_id;
- chan->tdest = chan_id;
+ xdev->common.directions = BIT(DMA_MEM_TO_DEV);
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -2457,7 +2438,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
"xlnx,axi-dma-s2mm-channel")) {
chan->direction = DMA_DEV_TO_MEM;
chan->id = chan_id;
- chan->tdest = chan_id - xdev->nr_channels;
+ xdev->common.directions |= BIT(DMA_DEV_TO_MEM);
chan->has_vflip = of_property_read_bool(node,
"xlnx,enable-vert-flip");
if (chan->has_vflip) {
@@ -2545,11 +2526,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
struct device_node *node)
{
- int ret, i, nr_channels = 1;
-
- ret = of_property_read_u32(node, "dma-channels", &nr_channels);
- if ((ret < 0) && xdev->mcdma)
- dev_warn(xdev->dev, "missing dma-channels property\n");
+ int i, nr_channels = 1;
for (i = 0; i < nr_channels; i++)
xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
@@ -2650,7 +2627,6 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
if (!of_property_read_u32(node, "xlnx,sg-length-width",
&len_width)) {
if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
@@ -2703,6 +2679,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
}
+ xdev->common.dst_addr_widths = BIT(addr_width / 8);
+ xdev->common.src_addr_widths = BIT(addr_width / 8);
xdev->common.device_alloc_chan_resources =
xilinx_dma_alloc_chan_resources;
xdev->common.device_free_chan_resources =
@@ -2715,14 +2693,14 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
xdev->common.device_prep_dma_cyclic =
xilinx_dma_prep_dma_cyclic;
- xdev->common.device_prep_interleaved_dma =
- xilinx_dma_prep_interleaved;
/* Residue calculation is supported by only AXI DMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
+ dma_cap_set(DMA_SG, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+ xdev->common.device_prep_dma_sg = xilinx_cdma_prep_sg;
} else {
xdev->common.device_prep_interleaved_dma =
xilinx_vdma_dma_prep_interleaved;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
new file mode 100644
index 000000000000..89ade893f51c
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -0,0 +1,2322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP DPDMA Engine driver
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "../dmaengine.h"
+
+/* DPDMA registers */
+#define XILINX_DPDMA_ERR_CTRL 0x0
+#define XILINX_DPDMA_ISR 0x4
+#define XILINX_DPDMA_IMR 0x8
+#define XILINX_DPDMA_IEN 0xc
+#define XILINX_DPDMA_IDS 0x10
+#define XILINX_DPDMA_INTR_DESC_DONE_MASK (0x3f << 0)
+#define XILINX_DPDMA_INTR_DESC_DONE_SHIFT 0
+#define XILINX_DPDMA_INTR_NO_OSTAND_MASK (0x3f << 6)
+#define XILINX_DPDMA_INTR_NO_OSTAND_SHIFT 6
+#define XILINX_DPDMA_INTR_AXI_ERR_MASK (0x3f << 12)
+#define XILINX_DPDMA_INTR_AXI_ERR_SHIFT 12
+#define XILINX_DPDMA_INTR_DESC_ERR_MASK (0x3f << 18)
+#define XILINX_DPDMA_INTR_DESC_ERR_SHIFT 16
+#define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24)
+#define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25)
+#define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26)
+#define XILINX_DPDMA_INTR_VSYNC BIT(27)
+#define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x41000
+#define XILINX_DPDMA_INTR_CHAN_ERR 0xfff000
+#define XILINX_DPDMA_INTR_GLOBAL_ERR 0x7000000
+#define XILINX_DPDMA_INTR_ERR_ALL 0x7fff000
+#define XILINX_DPDMA_INTR_CHAN_MASK 0x41041
+#define XILINX_DPDMA_INTR_GLOBAL_MASK 0xf000000
+#define XILINX_DPDMA_INTR_ALL 0xfffffff
+#define XILINX_DPDMA_EISR 0x14
+#define XILINX_DPDMA_EIMR 0x18
+#define XILINX_DPDMA_EIEN 0x1c
+#define XILINX_DPDMA_EIDS 0x20
+#define XILINX_DPDMA_EINTR_INV_APB BIT(0)
+#define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK (0x3f << 1)
+#define XILINX_DPDMA_EINTR_RD_AXI_ERR_SHIFT 1
+#define XILINX_DPDMA_EINTR_PRE_ERR_MASK (0x3f << 7)
+#define XILINX_DPDMA_EINTR_PRE_ERR_SHIFT 7
+#define XILINX_DPDMA_EINTR_CRC_ERR_MASK (0x3f << 13)
+#define XILINX_DPDMA_EINTR_CRC_ERR_SHIFT 13
+#define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK (0x3f << 19)
+#define XILINX_DPDMA_EINTR_WR_AXI_ERR_SHIFT 19
+#define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK (0x3f << 25)
+#define XILINX_DPDMA_EINTR_DESC_DONE_ERR_SHIFT 25
+#define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32)
+#define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x2082082
+#define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe
+#define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001
+#define XILINX_DPDMA_EINTR_ALL 0xffffffff
+#define XILINX_DPDMA_CNTL 0x100
+#define XILINX_DPDMA_GBL 0x104
+#define XILINX_DPDMA_GBL_TRIG_SHIFT 0
+#define XILINX_DPDMA_GBL_RETRIG_SHIFT 6
+#define XILINX_DPDMA_ALC0_CNTL 0x108
+#define XILINX_DPDMA_ALC0_STATUS 0x10c
+#define XILINX_DPDMA_ALC0_MAX 0x110
+#define XILINX_DPDMA_ALC0_MIN 0x114
+#define XILINX_DPDMA_ALC0_ACC 0x118
+#define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c
+#define XILINX_DPDMA_ALC1_CNTL 0x120
+#define XILINX_DPDMA_ALC1_STATUS 0x124
+#define XILINX_DPDMA_ALC1_MAX 0x128
+#define XILINX_DPDMA_ALC1_MIN 0x12c
+#define XILINX_DPDMA_ALC1_ACC 0x130
+#define XILINX_DPDMA_ALC1_ACC_TRAN 0x134
+
+/* Channel register */
+#define XILINX_DPDMA_CH_BASE 0x200
+#define XILINX_DPDMA_CH_OFFSET 0x100
+#define XILINX_DPDMA_CH_DESC_START_ADDRE 0x0
+#define XILINX_DPDMA_CH_DESC_START_ADDR 0x4
+#define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x8
+#define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0xc
+#define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x10
+#define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x14
+#define XILINX_DPDMA_CH_CNTL 0x18
+#define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0)
+#define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1)
+#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT 2
+#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT 6
+#define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT 10
+#define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11
+#define XILINX_DPDMA_CH_STATUS 0x1c
+#define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK (0xf << 21)
+#define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT 21
+#define XILINX_DPDMA_CH_VDO 0x20
+#define XILINX_DPDMA_CH_PYLD_SZ 0x24
+#define XILINX_DPDMA_CH_DESC_ID 0x28
+
+/* DPDMA descriptor fields */
+#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE (0xa5)
+#define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8)
+#define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9)
+#define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10)
+#define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18)
+#define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19)
+#define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20)
+#define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21)
+#define XILINX_DPDMA_DESC_ID_MASK (0xffff << 0)
+#define XILINX_DPDMA_DESC_ID_SHIFT (0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK (0x3ffff << 0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT (0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK (0x3fff << 18)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT (18)
+#define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK (0xfff)
+#define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT (16)
+
+#define XILINX_DPDMA_ALIGN_BYTES 256
+#define XILINX_DPDMA_LINESIZE_ALIGN_BITS 128
+
+#define XILINX_DPDMA_NUM_CHAN 6
+#define XILINX_DPDMA_PAGE_MASK ((1 << 12) - 1)
+#define XILINX_DPDMA_PAGE_SHIFT 12
+
+/**
+ * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
+ * @control: control configuration field
+ * @desc_id: descriptor ID
+ * @xfer_size: transfer size
+ * @hsize_stride: horizontal size and stride
+ * @timestamp_lsb: LSB of time stamp
+ * @timestamp_msb: MSB of time stamp
+ * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
+ * @next_desc: next descriptor 32 bit address
+ * @src_addr: payload source address (lower 32 bit of 1st 4KB page)
+ * @addr_ext_23: upper 16 bit of 48 bit address (src_addr2 and src_addr3)
+ * @addr_ext_45: upper 16 bit of 48 bit address (src_addr4 and src_addr5)
+ * @src_addr2: payload source address (lower 32 bit of 2nd 4KB page)
+ * @src_addr3: payload source address (lower 32 bit of 3rd 4KB page)
+ * @src_addr4: payload source address (lower 32 bit of 4th 4KB page)
+ * @src_addr5: payload source address (lower 32 bit of 5th 4KB page)
+ * @crc: descriptor CRC
+ */
+struct xilinx_dpdma_hw_desc {
+ u32 control;
+ u32 desc_id;
+ u32 xfer_size;
+ u32 hsize_stride;
+ u32 timestamp_lsb;
+ u32 timestamp_msb;
+ u32 addr_ext;
+ u32 next_desc;
+ u32 src_addr;
+ u32 addr_ext_23;
+ u32 addr_ext_45;
+ u32 src_addr2;
+ u32 src_addr3;
+ u32 src_addr4;
+ u32 src_addr5;
+ u32 crc;
+} __aligned(XILINX_DPDMA_ALIGN_BYTES);
+
+/**
+ * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
+ * @hw: DPDMA hardware descriptor
+ * @node: list node for software descriptors
+ * @phys: physical address of the software descriptor
+ */
+struct xilinx_dpdma_sw_desc {
+ struct xilinx_dpdma_hw_desc hw;
+ struct list_head node;
+ dma_addr_t phys;
+};
+
+/**
+ * enum xilinx_dpdma_tx_desc_status - DPDMA tx descriptor status
+ * @PREPARED: descriptor is prepared for transaction
+ * @ACTIVE: transaction is (being) done successfully
+ * @ERRORED: descriptor generates some errors
+ */
+enum xilinx_dpdma_tx_desc_status {
+ PREPARED,
+ ACTIVE,
+ ERRORED
+};
+
+/**
+ * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
+ * @async_tx: DMA async transaction descriptor
+ * @descriptors: list of software descriptors
+ * @node: list node for transaction descriptors
+ * @status: tx descriptor status
+ * @done_cnt: number of complete notification to deliver
+ */
+struct xilinx_dpdma_tx_desc {
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head descriptors;
+ struct list_head node;
+ enum xilinx_dpdma_tx_desc_status status;
+ unsigned int done_cnt;
+};
+
+/**
+ * enum xilinx_dpdma_chan_id - DPDMA channel ID
+ * @VIDEO0: video 1st channel
+ * @VIDEO1: video 2nd channel for multi plane yuv formats
+ * @VIDEO2: video 3rd channel for multi plane yuv formats
+ * @GRAPHICS: graphics channel
+ * @AUDIO0: 1st audio channel
+ * @AUDIO1: 2nd audio channel
+ */
+enum xilinx_dpdma_chan_id {
+ VIDEO0,
+ VIDEO1,
+ VIDEO2,
+ GRAPHICS,
+ AUDIO0,
+ AUDIO1
+};
+
+/**
+ * enum xilinx_dpdma_chan_status - DPDMA channel status
+ * @IDLE: idle state
+ * @STREAMING: actively streaming state
+ */
+enum xilinx_dpdma_chan_status {
+ IDLE,
+ STREAMING
+};
+
+/*
+ * DPDMA descriptor placement
+ * --------------------------
+ * DPDMA descritpor life time is described with following placements:
+ *
+ * allocated_desc -> submitted_desc -> pending_desc -> active_desc -> done_list
+ *
+ * Transition is triggered as following:
+ *
+ * -> allocated_desc : a descriptor allocation
+ * allocated_desc -> submitted_desc: a descriptor submission
+ * submitted_desc -> pending_desc: request to issue pending a descriptor
+ * pending_desc -> active_desc: VSYNC intr when a desc is scheduled to DPDMA
+ * active_desc -> done_list: VSYNC intr when DPDMA switches to a new desc
+ */
+
+/**
+ * struct xilinx_dpdma_chan - DPDMA channel
+ * @common: generic dma channel structure
+ * @reg: register base address
+ * @id: channel ID
+ * @wait_to_stop: queue to wait for outstanding transacitons before stopping
+ * @status: channel status
+ * @first_frame: flag for the first frame of stream
+ * @video_group: flag if multi-channel operation is needed for video channels
+ * @lock: lock to access struct xilinx_dpdma_chan
+ * @desc_pool: descriptor allocation pool
+ * @done_task: done IRQ bottom half handler
+ * @err_task: error IRQ bottom half handler
+ * @allocated_desc: allocated descriptor
+ * @submitted_desc: submitted descriptor
+ * @pending_desc: pending descriptor to be scheduled in next period
+ * @active_desc: descriptor that the DPDMA channel is active on
+ * @done_list: done descriptor list
+ * @xdev: DPDMA device
+ */
+struct xilinx_dpdma_chan {
+ struct dma_chan common;
+ void __iomem *reg;
+ enum xilinx_dpdma_chan_id id;
+
+ wait_queue_head_t wait_to_stop;
+ enum xilinx_dpdma_chan_status status;
+ bool first_frame;
+ bool video_group;
+
+ spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */
+ struct dma_pool *desc_pool;
+ struct tasklet_struct done_task;
+ struct tasklet_struct err_task;
+
+ struct xilinx_dpdma_tx_desc *allocated_desc;
+ struct xilinx_dpdma_tx_desc *submitted_desc;
+ struct xilinx_dpdma_tx_desc *pending_desc;
+ struct xilinx_dpdma_tx_desc *active_desc;
+ struct list_head done_list;
+
+ struct xilinx_dpdma_device *xdev;
+};
+
+/**
+ * struct xilinx_dpdma_device - DPDMA device
+ * @common: generic dma device structure
+ * @reg: register base address
+ * @dev: generic device structure
+ * @axi_clk: axi clock
+ * @chan: DPDMA channels
+ * @ext_addr: flag for 64 bit system (48 bit addressing)
+ * @desc_addr: descriptor addressing callback (32 bit vs 64 bit)
+ */
+struct xilinx_dpdma_device {
+ struct dma_device common;
+ void __iomem *reg;
+ struct device *dev;
+
+ struct clk *axi_clk;
+ struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
+
+ bool ext_addr;
+ void (*desc_addr)(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *prev,
+ dma_addr_t dma_addr[], unsigned int num_src_addr);
+};
+
+#ifdef CONFIG_XILINX_DPDMA_DEBUG_FS
+#define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
+#define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
+#define IN_RANGE(x, min, max) ({ \
+ typeof(x) _x = (x); \
+ _x >= (min) && _x <= (max); })
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+enum xilinx_dpdma_testcases {
+ DPDMA_TC_INTR_DONE,
+ DPDMA_TC_NONE
+};
+
+struct xilinx_dpdma_debugfs {
+ enum xilinx_dpdma_testcases testcase;
+ u16 xilinx_dpdma_intr_done_count;
+ enum xilinx_dpdma_chan_id chan_id;
+};
+
+static struct xilinx_dpdma_debugfs dpdma_debugfs;
+struct xilinx_dpdma_debugfs_request {
+ const char *req;
+ enum xilinx_dpdma_testcases tc;
+ ssize_t (*read_handler)(char **kern_buff);
+ ssize_t (*write_handler)(char **cmd);
+};
+
+static void xilinx_dpdma_debugfs_intr_done_count_incr(int chan_id)
+{
+ if (chan_id == dpdma_debugfs.chan_id)
+ dpdma_debugfs.xilinx_dpdma_intr_done_count++;
+}
+
+static s64 xilinx_dpdma_debugfs_argument_value(char *arg)
+{
+ s64 value;
+
+ if (!arg)
+ return -1;
+
+ if (!kstrtos64(arg, 0, &value))
+ return value;
+
+ return -1;
+}
+
+static ssize_t
+xilinx_dpdma_debugfs_desc_done_intr_write(char **dpdma_test_arg)
+{
+ char *arg;
+ char *arg_chan_id;
+ s64 id;
+
+ arg = strsep(dpdma_test_arg, " ");
+ if (strncasecmp(arg, "start", 5) != 0)
+ return -EINVAL;
+
+ arg_chan_id = strsep(dpdma_test_arg, " ");
+ id = xilinx_dpdma_debugfs_argument_value(arg_chan_id);
+
+ if (id < 0 || !IN_RANGE(id, VIDEO0, AUDIO1))
+ return -EINVAL;
+
+ dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
+ dpdma_debugfs.xilinx_dpdma_intr_done_count = 0;
+ dpdma_debugfs.chan_id = id;
+
+ return 0;
+}
+
+static ssize_t xilinx_dpdma_debugfs_desc_done_intr_read(char **kern_buff)
+{
+ size_t out_str_len;
+
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+ out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
+ out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(*kern_buff, out_str_len, "%d",
+ dpdma_debugfs.xilinx_dpdma_intr_done_count);
+
+ return 0;
+}
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
+ {"DESCRIPTOR_DONE_INTR", DPDMA_TC_INTR_DONE,
+ xilinx_dpdma_debugfs_desc_done_intr_read,
+ xilinx_dpdma_debugfs_desc_done_intr_write},
+};
+
+static ssize_t xilinx_dpdma_debugfs_write(struct file *f, const char __user
+ *buf, size_t size, loff_t *pos)
+{
+ char *kern_buff, *kern_buff_start;
+ char *dpdma_test_req;
+ int ret;
+ int i;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ /* Supporting single instance of test as of now*/
+ if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
+ return -EBUSY;
+
+ kern_buff = kzalloc(size, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+ kern_buff_start = kern_buff;
+
+ ret = strncpy_from_user(kern_buff, buf, size);
+ if (ret < 0) {
+ kfree(kern_buff_start);
+ return ret;
+ }
+
+ /* Read the testcase name from a user request */
+ dpdma_test_req = strsep(&kern_buff, " ");
+
+ for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
+ if (!strcasecmp(dpdma_test_req, dpdma_debugfs_reqs[i].req)) {
+ if (!dpdma_debugfs_reqs[i].write_handler(&kern_buff)) {
+ kfree(kern_buff_start);
+ return size;
+ }
+ break;
+ }
+ }
+ kfree(kern_buff_start);
+ return -EINVAL;
+}
+
+static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff = NULL;
+ size_t kern_buff_len, out_str_len;
+ enum xilinx_dpdma_testcases tc;
+ int ret;
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (*pos != 0)
+ return 0;
+
+ kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
+ if (!kern_buff) {
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+ return -ENOMEM;
+ }
+
+ tc = dpdma_debugfs.testcase;
+ if (tc == DPDMA_TC_NONE) {
+ out_str_len = strlen("No testcase executed");
+ out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(kern_buff, out_str_len, "%s", "No testcase executed");
+ } else {
+ ret = dpdma_debugfs_reqs[tc].read_handler(&kern_buff);
+ if (ret) {
+ kfree(kern_buff);
+ return ret;
+ }
+ }
+
+ kern_buff_len = strlen(kern_buff);
+ size = min(size, kern_buff_len);
+
+ ret = copy_to_user(buf, kern_buff, size);
+
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static const struct file_operations fops_xilinx_dpdma_dbgfs = {
+ .owner = THIS_MODULE,
+ .read = xilinx_dpdma_debugfs_read,
+ .write = xilinx_dpdma_debugfs_write,
+};
+
+static int xilinx_dpdma_debugfs_init(struct device *dev)
+{
+ int err;
+ struct dentry *xilinx_dpdma_debugfs_dir, *xilinx_dpdma_debugfs_file;
+
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+ xilinx_dpdma_debugfs_dir = debugfs_create_dir("dpdma", NULL);
+ if (!xilinx_dpdma_debugfs_dir) {
+ dev_err(dev, "debugfs_create_dir failed\n");
+ return -ENODEV;
+ }
+
+ xilinx_dpdma_debugfs_file =
+ debugfs_create_file("testcase", 0444,
+ xilinx_dpdma_debugfs_dir, NULL,
+ &fops_xilinx_dpdma_dbgfs);
+ if (!xilinx_dpdma_debugfs_file) {
+ dev_err(dev, "debugfs_create_file testcase failed\n");
+ err = -ENODEV;
+ goto err_dbgfs;
+ }
+ return 0;
+
+err_dbgfs:
+ debugfs_remove_recursive(xilinx_dpdma_debugfs_dir);
+ xilinx_dpdma_debugfs_dir = NULL;
+ return err;
+}
+
+#else
+static int xilinx_dpdma_debugfs_init(struct device *dev)
+{
+ return 0;
+}
+
+static void xilinx_dpdma_debugfs_intr_done_count_incr(int chan_id)
+{
+}
+#endif /* CONFIG_XILINX_DPDMA_DEBUG_FS */
+
+#define to_dpdma_tx_desc(tx) \
+ container_of(tx, struct xilinx_dpdma_tx_desc, async_tx)
+
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct xilinx_dpdma_chan, common)
+
+/* IO operations */
+
+static inline u32 dpdma_read(void __iomem *base, u32 offset)
+{
+ return ioread32(base + offset);
+}
+
+static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
+{
+ iowrite32(val, base + offset);
+}
+
+static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
+{
+ dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
+}
+
+static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
+{
+ dpdma_write(base, offset, dpdma_read(base, offset) | set);
+}
+
+/* Xilinx DPDMA descriptor operations */
+
+/**
+ * xilinx_dpdma_sw_desc_next_32 - Set 32 bit address of a next sw descriptor
+ * @sw_desc: current software descriptor
+ * @next: next descriptor
+ *
+ * Update the current sw descriptor @sw_desc with 32 bit address of the next
+ * descriptor @next.
+ */
+static inline void
+xilinx_dpdma_sw_desc_next_32(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *next)
+{
+ sw_desc->hw.next_desc = next->phys;
+}
+
+/**
+ * xilinx_dpdma_sw_desc_addr_32 - Update the sw descriptor with 32 bit address
+ * @sw_desc: software descriptor
+ * @prev: previous descriptor
+ * @dma_addr: array of dma addresses
+ * @num_src_addr: number of addresses in @dma_addr
+ *
+ * Update the descriptor @sw_desc with 32 bit address.
+ */
+static void xilinx_dpdma_sw_desc_addr_32(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *prev,
+ dma_addr_t dma_addr[],
+ unsigned int num_src_addr)
+{
+ struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+ unsigned int i;
+
+ hw_desc->src_addr = dma_addr[0];
+
+ if (prev)
+ xilinx_dpdma_sw_desc_next_32(prev, sw_desc);
+
+ for (i = 1; i < num_src_addr; i++) {
+ u32 *addr = &hw_desc->src_addr2;
+ u32 frag_addr;
+
+ frag_addr = dma_addr[i];
+ addr[i - 1] = frag_addr;
+ }
+}
+
+/**
+ * xilinx_dpdma_sw_desc_next_64 - Set 64 bit address of a next sw descriptor
+ * @sw_desc: current software descriptor
+ * @next: next descriptor
+ *
+ * Update the current sw descriptor @sw_desc with 64 bit address of the next
+ * descriptor @next.
+ */
+static inline void
+xilinx_dpdma_sw_desc_next_64(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *next)
+{
+ sw_desc->hw.next_desc = lower_32_bits(next->phys);
+ sw_desc->hw.addr_ext |= upper_32_bits(next->phys) &
+ XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
+}
+
+/**
+ * xilinx_dpdma_sw_desc_addr_64 - Update the sw descriptor with 64 bit address
+ * @sw_desc: software descriptor
+ * @prev: previous descriptor
+ * @dma_addr: array of dma addresses
+ * @num_src_addr: number of addresses in @dma_addr
+ *
+ * Update the descriptor @sw_desc with 64 bit address.
+ */
+static void xilinx_dpdma_sw_desc_addr_64(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *prev,
+ dma_addr_t dma_addr[],
+ unsigned int num_src_addr)
+{
+ struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+ unsigned int i;
+ u32 src_addr_extn;
+
+ hw_desc->src_addr = lower_32_bits(dma_addr[0]);
+ src_addr_extn = upper_32_bits(dma_addr[0]) &
+ XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
+ hw_desc->addr_ext |= (src_addr_extn <<
+ XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT);
+
+ if (prev)
+ xilinx_dpdma_sw_desc_next_64(prev, sw_desc);
+
+ for (i = 1; i < num_src_addr; i++) {
+ u32 *addr = &hw_desc->src_addr2;
+ u32 *addr_ext = &hw_desc->addr_ext_23;
+ u64 frag_addr;
+
+ frag_addr = dma_addr[i];
+ addr[i] = (u32)frag_addr;
+
+ frag_addr >>= 32;
+ frag_addr &= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
+ frag_addr <<= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT * (i % 2);
+ addr_ext[i / 2] = frag_addr;
+ }
+}
+
+/* Xilinx DPDMA channel descriptor operations */
+
+/**
+ * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
+ * @chan: DPDMA channel
+ *
+ * Allocate a software descriptor from the channel's descriptor pool.
+ *
+ * Return: a software descriptor or NULL.
+ */
+static struct xilinx_dpdma_sw_desc *
+xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ dma_addr_t phys;
+
+ sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ if (!sw_desc)
+ return NULL;
+
+ sw_desc->phys = phys;
+
+ return sw_desc;
+}
+
+/**
+ * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
+ * @chan: DPDMA channel
+ * @sw_desc: software descriptor to free
+ *
+ * Free a software descriptor from the channel's descriptor pool.
+ */
+static void
+xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_sw_desc *sw_desc)
+{
+ dma_pool_free(chan->desc_pool, sw_desc, sw_desc->phys);
+}
+
+/**
+ * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
+ * @chan: DPDMA channel
+ * @tx_desc: tx descriptor to dump
+ *
+ * Dump contents of a tx descriptor
+ */
+static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_tx_desc *tx_desc)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ struct device *dev = chan->xdev->dev;
+ unsigned int i = 0;
+
+ dev_dbg(dev, "------- TX descriptor dump start -------\n");
+ dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
+
+ list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
+ struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+
+ dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
+ dev_dbg(dev, "descriptor phys: %pad\n", &sw_desc->phys);
+ dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
+ dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
+ dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
+ dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
+ dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
+ dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
+ dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
+ dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
+ dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
+ dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
+ dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
+ dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
+ dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
+ dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
+ dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
+ dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
+ }
+
+ dev_dbg(dev, "------- TX descriptor dump end -------\n");
+}
+
+/**
+ * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
+ * @chan: DPDMA channel
+ *
+ * Allocate a tx descriptor.
+ *
+ * Return: a tx descriptor or NULL.
+ */
+static struct xilinx_dpdma_tx_desc *
+xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+
+ tx_desc = kzalloc(sizeof(*tx_desc), GFP_ATOMIC);
+ if (!tx_desc)
+ return NULL;
+
+ INIT_LIST_HEAD(&tx_desc->descriptors);
+ tx_desc->status = PREPARED;
+
+ return tx_desc;
+}
+
+/**
+ * xilinx_dpdma_chan_free_tx_desc - Free a transaction descriptor
+ * @chan: DPDMA channel
+ * @tx_desc: tx descriptor
+ *
+ * Free the tx descriptor @tx_desc including its software descriptors.
+ */
+static void
+xilinx_dpdma_chan_free_tx_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_tx_desc *tx_desc)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc, *next;
+
+ if (!tx_desc)
+ return;
+
+ list_for_each_entry_safe(sw_desc, next, &tx_desc->descriptors, node) {
+ list_del(&sw_desc->node);
+ xilinx_dpdma_chan_free_sw_desc(chan, sw_desc);
+ }
+
+ kfree(tx_desc);
+}
+
+/**
+ * xilinx_dpdma_chan_submit_tx_desc - Submit a transaction descriptor
+ * @chan: DPDMA channel
+ * @tx_desc: tx descriptor
+ *
+ * Submit the tx descriptor @tx_desc to the channel @chan.
+ *
+ * Return: a cookie assigned to the tx descriptor
+ */
+static dma_cookie_t
+xilinx_dpdma_chan_submit_tx_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_tx_desc *tx_desc)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (chan->submitted_desc) {
+ cookie = chan->submitted_desc->async_tx.cookie;
+ goto out_unlock;
+ }
+
+ cookie = dma_cookie_assign(&tx_desc->async_tx);
+
+ /* Assign the cookie to descriptors in this transaction */
+ /* Only 16 bit will be used, but it should be enough */
+ list_for_each_entry(sw_desc, &tx_desc->descriptors, node)
+ sw_desc->hw.desc_id = cookie;
+
+ if (tx_desc != chan->allocated_desc)
+ dev_err(chan->xdev->dev, "desc != allocated_desc\n");
+ else
+ chan->allocated_desc = NULL;
+ chan->submitted_desc = tx_desc;
+
+ if (chan->id == VIDEO1 || chan->id == VIDEO2) {
+ chan->video_group = true;
+ chan->xdev->chan[VIDEO0]->video_group = true;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/**
+ * xilinx_dpdma_chan_free_desc_list - Free a descriptor list
+ * @chan: DPDMA channel
+ * @list: tx descriptor list
+ *
+ * Free tx descriptors in the list @list.
+ */
+static void xilinx_dpdma_chan_free_desc_list(struct xilinx_dpdma_chan *chan,
+ struct list_head *list)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc, *next;
+
+ list_for_each_entry_safe(tx_desc, next, list, node) {
+ list_del(&tx_desc->node);
+ xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+ }
+}
+
+/**
+ * xilinx_dpdma_chan_free_all_desc - Free all descriptors of the channel
+ * @chan: DPDMA channel
+ *
+ * Free all descriptors associated with the channel. The channel should be
+ * disabled before this function is called, otherwise, this function may
+ * result in misbehavior of the system due to remaining outstanding
+ * transactions.
+ */
+static void xilinx_dpdma_chan_free_all_desc(struct xilinx_dpdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ dev_dbg(chan->xdev->dev, "chan->status = %s\n",
+ chan->status == STREAMING ? "STREAMING" : "IDLE");
+
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->allocated_desc);
+ chan->allocated_desc = NULL;
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->submitted_desc);
+ chan->submitted_desc = NULL;
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->pending_desc);
+ chan->pending_desc = NULL;
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->active_desc);
+ chan->active_desc = NULL;
+ xilinx_dpdma_chan_free_desc_list(chan, &chan->done_list);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_cleanup_desc - Clean up descriptors
+ * @chan: DPDMA channel
+ *
+ * Trigger the complete callbacks of descriptors with finished transactions.
+ * Free descriptors which are no longer in use.
+ */
+static void xilinx_dpdma_chan_cleanup_desc(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_tx_desc *desc;
+ dma_async_tx_callback callback;
+ void *callback_param;
+ unsigned long flags;
+ unsigned int cnt, i;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ while (!list_empty(&chan->done_list)) {
+ desc = list_first_entry(&chan->done_list,
+ struct xilinx_dpdma_tx_desc, node);
+ list_del(&desc->node);
+
+ cnt = desc->done_cnt;
+ desc->done_cnt = 0;
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ for (i = 0; i < cnt; i++)
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ xilinx_dpdma_chan_free_tx_desc(chan, desc);
+ }
+
+ if (chan->active_desc) {
+ cnt = chan->active_desc->done_cnt;
+ chan->active_desc->done_cnt = 0;
+ callback = chan->active_desc->async_tx.callback;
+ callback_param = chan->active_desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ for (i = 0; i < cnt; i++)
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_desc_active - Set the descriptor as active
+ * @chan: DPDMA channel
+ *
+ * Make the pending descriptor @chan->pending_desc as active. This function
+ * should be called when the channel starts operating on the pending descriptor.
+ */
+static void xilinx_dpdma_chan_desc_active(struct xilinx_dpdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->pending_desc)
+ goto out_unlock;
+
+ if (chan->active_desc)
+ list_add_tail(&chan->active_desc->node, &chan->done_list);
+
+ chan->active_desc = chan->pending_desc;
+ chan->pending_desc = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_desc_done_intr - Mark the current descriptor as 'done'
+ * @chan: DPDMA channel
+ *
+ * Mark the current active descriptor @chan->active_desc as 'done'. This
+ * function should be called to mark completion of the currently active
+ * descriptor.
+ */
+static void xilinx_dpdma_chan_desc_done_intr(struct xilinx_dpdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xilinx_dpdma_debugfs_intr_done_count_incr(chan->id);
+
+ if (!chan->active_desc) {
+ dev_dbg(chan->xdev->dev, "done intr with no active desc\n");
+ goto out_unlock;
+ }
+
+ chan->active_desc->done_cnt++;
+ if (chan->active_desc->status == PREPARED) {
+ dma_cookie_complete(&chan->active_desc->async_tx);
+ chan->active_desc->status = ACTIVE;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ tasklet_schedule(&chan->done_task);
+}
+
+/**
+ * xilinx_dpdma_chan_prep_slave_sg - Prepare a scatter-gather dma descriptor
+ * @chan: DPDMA channel
+ * @sgl: scatter-gather list
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * for the given scatter-gather transaction.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_slave_sg(struct xilinx_dpdma_chan *chan,
+ struct scatterlist *sgl)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
+ struct scatterlist *iter = sgl;
+ u32 line_size = 0;
+
+ if (chan->allocated_desc)
+ return &chan->allocated_desc->async_tx;
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return NULL;
+
+ while (!sg_is_chain(iter))
+ line_size += sg_dma_len(iter++);
+
+ while (sgl) {
+ struct xilinx_dpdma_hw_desc *hw_desc;
+ dma_addr_t dma_addr[4];
+ unsigned int num_pages = 0;
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc)
+ goto error;
+
+ while (!sg_is_chain(sgl) && !sg_is_last(sgl)) {
+ dma_addr[num_pages] = sg_dma_address(sgl++);
+ if (!IS_ALIGNED(dma_addr[num_pages++],
+ XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev,
+ "buffer should be aligned at %d B\n",
+ XILINX_DPDMA_ALIGN_BYTES);
+ goto error;
+ }
+ }
+
+ chan->xdev->desc_addr(sw_desc, last, dma_addr, num_pages);
+ hw_desc = &sw_desc->hw;
+ hw_desc->xfer_size = line_size;
+ hw_desc->hsize_stride =
+ line_size << XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_FRAG_MODE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+ last = sw_desc;
+ if (sg_is_last(sgl))
+ break;
+ sgl = sg_chain_ptr(sgl);
+ }
+
+ sw_desc = list_first_entry(&tx_desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ if (chan->xdev->ext_addr)
+ xilinx_dpdma_sw_desc_next_64(last, sw_desc);
+ else
+ xilinx_dpdma_sw_desc_next_32(last, sw_desc);
+ last->hw.control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+ last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ chan->allocated_desc = tx_desc;
+
+ return &tx_desc->async_tx;
+
+error:
+ xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+
+ return NULL;
+}
+
+/**
+ * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
+ * @chan: DPDMA channel
+ * @buf_addr: buffer address
+ * @buf_len: buffer length
+ * @period_len: number of periods
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * for the given cyclic transaction.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
+ unsigned int periods = buf_len / period_len;
+ unsigned int i;
+
+ if (chan->allocated_desc)
+ return &chan->allocated_desc->async_tx;
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return NULL;
+
+ for (i = 0; i < periods; i++) {
+ struct xilinx_dpdma_hw_desc *hw_desc;
+
+ if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev,
+ "buffer should be aligned at %d B\n",
+ XILINX_DPDMA_ALIGN_BYTES);
+ goto error;
+ }
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc)
+ goto error;
+
+ chan->xdev->desc_addr(sw_desc, last, &buf_addr, 1);
+ hw_desc = &sw_desc->hw;
+ hw_desc->xfer_size = period_len;
+ hw_desc->hsize_stride =
+ period_len <<
+ XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
+ hw_desc->hsize_stride |=
+ period_len <<
+ XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+
+ buf_addr += period_len;
+ last = sw_desc;
+ }
+
+ sw_desc = list_first_entry(&tx_desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ if (chan->xdev->ext_addr)
+ xilinx_dpdma_sw_desc_next_64(last, sw_desc);
+ else
+ xilinx_dpdma_sw_desc_next_32(last, sw_desc);
+ last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ chan->allocated_desc = tx_desc;
+
+ return &tx_desc->async_tx;
+
+error:
+ xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+
+ return NULL;
+}
+
+/**
+ * xilinx_dpdma_chan_prep_interleaved - Prepare a interleaved dma descriptor
+ * @chan: DPDMA channel
+ * @xt: dma interleaved template
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * based on @xt.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_interleaved(struct xilinx_dpdma_chan *chan,
+ struct dma_interleaved_template *xt)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ struct xilinx_dpdma_hw_desc *hw_desc;
+ size_t hsize = xt->sgl[0].size;
+ size_t stride = hsize + xt->sgl[0].icg;
+
+ if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n",
+ XILINX_DPDMA_ALIGN_BYTES);
+ return NULL;
+ }
+
+ if (chan->allocated_desc)
+ return &chan->allocated_desc->async_tx;
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return NULL;
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc)
+ goto error;
+
+ chan->xdev->desc_addr(sw_desc, sw_desc, &xt->src_start, 1);
+ hw_desc = &sw_desc->hw;
+ hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8);
+ hw_desc->xfer_size = hsize * xt->numf;
+ hw_desc->hsize_stride = hsize <<
+ XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
+ hw_desc->hsize_stride |= (stride / 16) <<
+ XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+ chan->allocated_desc = tx_desc;
+
+ return &tx_desc->async_tx;
+
+error:
+ xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+
+ return NULL;
+}
+
+/* Xilinx DPDMA channel operations */
+
+/**
+ * xilinx_dpdma_chan_enable - Enable the channel
+ * @chan: DPDMA channel
+ *
+ * Enable the channel and its interrupts. Set the QoS values for video class.
+ */
+static inline void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
+{
+ u32 reg;
+
+ reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
+ reg |= XILINX_DPDMA_INTR_GLOBAL_MASK;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
+ reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
+ reg |= XILINX_DPDMA_INTR_GLOBAL_ERR;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
+
+ reg = XILINX_DPDMA_CH_CNTL_ENABLE;
+ reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
+ XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT;
+ reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
+ XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT;
+ reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
+ XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT;
+ dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
+}
+
+/**
+ * xilinx_dpdma_chan_disable - Disable the channel
+ * @chan: DPDMA channel
+ *
+ * Disable the channel and its interrupts.
+ */
+static inline void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
+{
+ u32 reg;
+
+ reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
+ reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
+
+ dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
+}
+
+/**
+ * xilinx_dpdma_chan_pause - Pause the channel
+ * @chan: DPDMA channel
+ *
+ * Pause the channel.
+ */
+static inline void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
+{
+ dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
+}
+
+/**
+ * xilinx_dpdma_chan_unpause - Unpause the channel
+ * @chan: DPDMA channel
+ *
+ * Unpause the channel.
+ */
+static inline void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
+{
+ dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
+}
+
+static u32
+xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ u32 i = 0, ret = 0;
+
+ for (i = VIDEO0; i < GRAPHICS; i++) {
+ if (xdev->chan[i]->video_group &&
+ xdev->chan[i]->status != STREAMING)
+ return 0;
+
+ if (xdev->chan[i]->video_group)
+ ret |= BIT(i);
+ }
+
+ return ret;
+}
+
+/**
+ * xilinx_dpdma_chan_issue_pending - Issue the pending descriptor
+ * @chan: DPDMA channel
+ *
+ * Issue the first pending descriptor from @chan->submitted_desc. If the channel
+ * is already streaming, the channel is re-triggered with the pending
+ * descriptor.
+ */
+static void xilinx_dpdma_chan_issue_pending(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ unsigned long flags;
+ u32 reg, channels;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->submitted_desc || chan->pending_desc)
+ goto out_unlock;
+
+ chan->pending_desc = chan->submitted_desc;
+ chan->submitted_desc = NULL;
+
+ sw_desc = list_first_entry(&chan->pending_desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
+ (u32)sw_desc->phys);
+ if (xdev->ext_addr)
+ dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
+ ((u64)sw_desc->phys >> 32) &
+ XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK);
+
+ if (chan->first_frame) {
+ chan->first_frame = false;
+ if (chan->video_group) {
+ channels = xilinx_dpdma_chan_video_group_ready(chan);
+ if (!channels)
+ goto out_unlock;
+ reg = channels << XILINX_DPDMA_GBL_TRIG_SHIFT;
+ } else {
+ reg = 1 << (XILINX_DPDMA_GBL_TRIG_SHIFT + chan->id);
+ }
+ } else {
+ if (chan->video_group) {
+ channels = xilinx_dpdma_chan_video_group_ready(chan);
+ if (!channels)
+ goto out_unlock;
+ reg = channels << XILINX_DPDMA_GBL_RETRIG_SHIFT;
+ } else {
+ reg = 1 << (XILINX_DPDMA_GBL_RETRIG_SHIFT + chan->id);
+ }
+ }
+
+ dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_start - Start the channel
+ * @chan: DPDMA channel
+ *
+ * Start the channel by enabling interrupts and triggering the channel.
+ * If the channel is enabled already or there's no pending descriptor, this
+ * function won't do anything on the channel.
+ */
+static void xilinx_dpdma_chan_start(struct xilinx_dpdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->submitted_desc || chan->status == STREAMING)
+ goto out_unlock;
+
+ xilinx_dpdma_chan_unpause(chan);
+ xilinx_dpdma_chan_enable(chan);
+ chan->first_frame = true;
+ chan->status = STREAMING;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_ostand - Number of outstanding transactions
+ * @chan: DPDMA channel
+ *
+ * Read and return the number of outstanding transactions from register.
+ *
+ * Return: Number of outstanding transactions from the status register.
+ */
+static inline u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
+{
+ return dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS) >>
+ XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT &
+ XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK;
+}
+
+/**
+ * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event
+ * @chan: DPDMA channel
+ *
+ * Notify waiters for no outstanding event, so waiters can stop the channel
+ * safely. This function is supposed to be called when 'no outstanding'
+ * interrupt is generated. The 'no outstanding' interrupt is disabled and
+ * should be re-enabled when this event is handled. If the channel status
+ * register still shows some number of outstanding transactions, the interrupt
+ * remains enabled.
+ *
+ * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
+ * transaction(s).
+ */
+static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+ u32 cnt;
+
+ cnt = xilinx_dpdma_chan_ostand(chan);
+ if (cnt) {
+ dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt);
+ return -EWOULDBLOCK;
+ }
+
+ /* Disable 'no outstanding' interrupt */
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
+ 1 << (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
+ wake_up(&chan->wait_to_stop);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding intr
+ * @chan: DPDMA channel
+ *
+ * Wait for the no outstanding transaction interrupt. This functions can sleep
+ * for 50ms.
+ *
+ * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
+ * from wait_event_interruptible_timeout().
+ */
+static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+ int ret;
+
+ /* Wait for a no outstanding transaction interrupt upto 50msec */
+ ret = wait_event_interruptible_timeout(chan->wait_to_stop,
+ !xilinx_dpdma_chan_ostand(chan),
+ msecs_to_jiffies(50));
+ if (ret > 0) {
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
+ 1 <<
+ (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
+ return 0;
+ }
+
+ dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
+ xilinx_dpdma_chan_ostand(chan));
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return ret;
+}
+
+/**
+ * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status
+ * @chan: DPDMA channel
+ *
+ * Poll the outstanding transaction status, and return when there's no
+ * outstanding transaction. This functions can be used in the interrupt context
+ * or where the atomicity is required. Calling thread may wait more than 50ms.
+ *
+ * Return: 0 on success, or -ETIMEDOUT.
+ */
+static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+ u32 cnt, loop = 50000;
+
+ /* Poll at least for 50ms (20 fps). */
+ do {
+ cnt = xilinx_dpdma_chan_ostand(chan);
+ udelay(1);
+ } while (loop-- > 0 && cnt);
+
+ if (loop) {
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
+ 1 <<
+ (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
+ return 0;
+ }
+
+ dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
+ xilinx_dpdma_chan_ostand(chan));
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * xilinx_dpdma_chan_stop - Stop the channel
+ * @chan: DPDMA channel
+ * @poll: flag whether to poll or wait
+ *
+ * Stop the channel with the following sequence: 1. Pause, 2. Wait (sleep) for
+ * no outstanding transaction interrupt, 3. Disable the channel.
+ *
+ * Return: 0 on success, or an error from xilinx_dpdma_chan_poll/wait_ostand().
+ */
+static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan, bool poll)
+{
+ unsigned long flags;
+ bool ret;
+
+ xilinx_dpdma_chan_pause(chan);
+ if (poll)
+ ret = xilinx_dpdma_chan_poll_no_ostand(chan);
+ else
+ ret = xilinx_dpdma_chan_wait_no_ostand(chan);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ xilinx_dpdma_chan_disable(chan);
+ chan->status = IDLE;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_alloc_resources - Allocate resources for the channel
+ * @chan: DPDMA channel
+ *
+ * Allocate a descriptor pool for the channel.
+ *
+ * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
+ */
+static int xilinx_dpdma_chan_alloc_resources(struct xilinx_dpdma_chan *chan)
+{
+ chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
+ chan->xdev->dev,
+ sizeof(struct xilinx_dpdma_sw_desc),
+ __alignof__(struct xilinx_dpdma_sw_desc), 0);
+ if (!chan->desc_pool) {
+ dev_err(chan->xdev->dev,
+ "failed to allocate a descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_free_resources - Free all resources for the channel
+ * @chan: DPDMA channel
+ *
+ * Free all descriptors and the descriptor pool for the channel.
+ */
+static void xilinx_dpdma_chan_free_resources(struct xilinx_dpdma_chan *chan)
+{
+ xilinx_dpdma_chan_free_all_desc(chan);
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+}
+
+/**
+ * xilinx_dpdma_chan_terminate_all - Terminate the channel and descriptors
+ * @chan: DPDMA channel
+ *
+ * Stop the channel and free all associated descriptors. Poll the no outstanding
+ * transaction interrupt as this can be called from an atomic context.
+ *
+ * Return: 0 on success, or the error code from xilinx_dpdma_chan_stop().
+ */
+static int xilinx_dpdma_chan_terminate_all(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ int ret;
+ unsigned int i;
+
+ if (chan->video_group) {
+ for (i = VIDEO0; i < GRAPHICS; i++) {
+ if (xdev->chan[i]->video_group &&
+ xdev->chan[i]->status == STREAMING) {
+ xilinx_dpdma_chan_pause(xdev->chan[i]);
+ xdev->chan[i]->video_group = false;
+ }
+ }
+ }
+
+ ret = xilinx_dpdma_chan_stop(chan, true);
+ if (ret)
+ return ret;
+
+ xilinx_dpdma_chan_free_all_desc(chan);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_synchronize - Synchronize all outgoing transfer
+ * @chan: DPDMA channel
+ *
+ * Stop the channel and free all associated descriptors. As this can't be
+ * called in an atomic context, sleep-wait for no outstanding transaction
+ * interrupt. Then kill all related tasklets.
+ *
+ * Return: 0 on success, or the error code from xilinx_dpdma_chan_stop().
+ */
+static int xilinx_dpdma_chan_synchronize(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ int ret;
+ unsigned int i;
+
+ if (chan->video_group) {
+ for (i = VIDEO0; i < GRAPHICS; i++) {
+ if (xdev->chan[i]->video_group &&
+ xdev->chan[i]->status == STREAMING) {
+ xilinx_dpdma_chan_pause(xdev->chan[i]);
+ xdev->chan[i]->video_group = false;
+ }
+ }
+ }
+
+ ret = xilinx_dpdma_chan_stop(chan, false);
+ if (ret)
+ return ret;
+
+ tasklet_kill(&chan->err_task);
+ tasklet_kill(&chan->done_task);
+ xilinx_dpdma_chan_free_all_desc(chan);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_err - Detect any channel error
+ * @chan: DPDMA channel
+ * @isr: masked Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Return: true if any channel error occurs, or false otherwise.
+ */
+static bool
+xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
+{
+ if (!chan)
+ return false;
+
+ if (chan->status == STREAMING &&
+ ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
+ (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
+ return true;
+
+ return false;
+}
+
+/**
+ * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
+ * @chan: DPDMA channel
+ *
+ * This function is called when any channel error or any global error occurs.
+ * The function disables the paused channel by errors and determines
+ * if the current active descriptor can be rescheduled depending on
+ * the descriptor status.
+ */
+static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ struct device *dev = xdev->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ dev_dbg(dev, "cur desc addr = 0x%04x%08x\n",
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
+ dev_dbg(dev, "cur payload addr = 0x%04x%08x\n",
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
+
+ xilinx_dpdma_chan_disable(chan);
+ chan->status = IDLE;
+
+ if (!chan->active_desc)
+ goto out_unlock;
+
+ xilinx_dpdma_chan_dump_tx_desc(chan, chan->active_desc);
+
+ switch (chan->active_desc->status) {
+ case ERRORED:
+ dev_dbg(dev, "repeated error on desc\n");
+ case ACTIVE:
+ case PREPARED:
+ /* Reschedule if there's no new descriptor */
+ if (!chan->pending_desc && !chan->submitted_desc) {
+ chan->active_desc->status = ERRORED;
+ chan->submitted_desc = chan->active_desc;
+ } else {
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->active_desc);
+ }
+ break;
+ }
+ chan->active_desc = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/* DMA tx descriptor */
+
+static dma_cookie_t xilinx_dpdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(tx->chan);
+ struct xilinx_dpdma_tx_desc *tx_desc = to_dpdma_tx_desc(tx);
+
+ return xilinx_dpdma_chan_submit_tx_desc(chan, tx_desc);
+}
+
+/* DMA channel operations */
+
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct dma_async_tx_descriptor *async_tx;
+
+ if (direction != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (!sgl || sg_len < 2)
+ return NULL;
+
+ async_tx = xilinx_dpdma_chan_prep_slave_sg(chan, sgl);
+ if (!async_tx)
+ return NULL;
+
+ dma_async_tx_descriptor_init(async_tx, dchan);
+ async_tx->tx_submit = xilinx_dpdma_tx_submit;
+ async_tx->flags = flags;
+ async_tx_ack(async_tx);
+
+ return async_tx;
+}
+
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct dma_async_tx_descriptor *async_tx;
+
+ if (direction != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (buf_len % period_len)
+ return NULL;
+
+ async_tx = xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
+ period_len);
+ if (!async_tx)
+ return NULL;
+
+ dma_async_tx_descriptor_init(async_tx, dchan);
+ async_tx->tx_submit = xilinx_dpdma_tx_submit;
+ async_tx->flags = flags;
+ async_tx_ack(async_tx);
+
+ return async_tx;
+}
+
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct dma_async_tx_descriptor *async_tx;
+
+ if (xt->dir != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (!xt->numf || !xt->sgl[0].size)
+ return NULL;
+
+ async_tx = xilinx_dpdma_chan_prep_interleaved(chan, xt);
+ if (!async_tx)
+ return NULL;
+
+ dma_async_tx_descriptor_init(async_tx, dchan);
+ async_tx->tx_submit = xilinx_dpdma_tx_submit;
+ async_tx->flags = flags;
+ async_tx_ack(async_tx);
+
+ return async_tx;
+}
+
+static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ dma_cookie_init(dchan);
+
+ return xilinx_dpdma_chan_alloc_resources(chan);
+}
+
+static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_dpdma_chan_free_resources(chan);
+}
+
+static enum dma_status xilinx_dpdma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_dpdma_chan_start(chan);
+ xilinx_dpdma_chan_issue_pending(chan);
+}
+
+static int xilinx_dpdma_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ if (config->direction != DMA_MEM_TO_DEV)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int xilinx_dpdma_pause(struct dma_chan *dchan)
+{
+ xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
+
+ return 0;
+}
+
+static int xilinx_dpdma_resume(struct dma_chan *dchan)
+{
+ xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
+
+ return 0;
+}
+
+static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
+{
+ return xilinx_dpdma_chan_terminate_all(to_xilinx_chan(dchan));
+}
+
+static void xilinx_dpdma_synchronize(struct dma_chan *dchan)
+{
+ xilinx_dpdma_chan_synchronize(to_xilinx_chan(dchan));
+}
+
+/* Xilinx DPDMA device operations */
+
+/**
+ * xilinx_dpdma_err - Detect any global error
+ * @isr: Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Return: True if any global error occurs, or false otherwise.
+ */
+static bool xilinx_dpdma_err(u32 isr, u32 eisr)
+{
+ if ((isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
+ eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR))
+ return true;
+
+ return false;
+}
+
+/**
+ * xilinx_dpdma_handle_err_intr - Handle DPDMA error interrupt
+ * @xdev: DPDMA device
+ * @isr: masked Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Handle if any error occurs based on @isr and @eisr. This function disables
+ * corresponding error interrupts, and those should be re-enabled once handling
+ * is done.
+ */
+static void xilinx_dpdma_handle_err_intr(struct xilinx_dpdma_device *xdev,
+ u32 isr, u32 eisr)
+{
+ bool err = xilinx_dpdma_err(isr, eisr);
+ unsigned int i;
+
+ dev_dbg_ratelimited(xdev->dev,
+ "error intr: isr = 0x%08x, eisr = 0x%08x\n",
+ isr, eisr);
+
+ /* Disable channel error interrupts until errors are handled. */
+ dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
+ isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
+ eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
+
+ for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
+ if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
+ tasklet_schedule(&xdev->chan[i]->err_task);
+}
+
+/**
+ * xilinx_dpdma_handle_vsync_intr - Handle the VSYNC interrupt
+ * @xdev: DPDMA device
+ *
+ * Handle the VSYNC event. At this point, the current frame becomes active,
+ * which means the DPDMA actually starts fetching, and the next frame can be
+ * scheduled.
+ */
+static void xilinx_dpdma_handle_vsync_intr(struct xilinx_dpdma_device *xdev)
+{
+ unsigned int i;
+
+ for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++) {
+ if (xdev->chan[i] &&
+ xdev->chan[i]->status == STREAMING) {
+ xilinx_dpdma_chan_desc_active(xdev->chan[i]);
+ xilinx_dpdma_chan_issue_pending(xdev->chan[i]);
+ }
+ }
+}
+
+/**
+ * xilinx_dpdma_enable_intr - Enable interrupts
+ * @xdev: DPDMA device
+ *
+ * Enable interrupts.
+ */
+static void xilinx_dpdma_enable_intr(struct xilinx_dpdma_device *xdev)
+{
+ dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
+}
+
+/**
+ * xilinx_dpdma_disable_intr - Disable interrupts
+ * @xdev: DPDMA device
+ *
+ * Disable interrupts.
+ */
+static void xilinx_dpdma_disable_intr(struct xilinx_dpdma_device *xdev)
+{
+ dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
+}
+
+/* Interrupt handling operations*/
+
+/**
+ * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
+ * @data: tasklet data to be casted to DPDMA channel structure
+ *
+ * Per channel error handling tasklet. This function waits for the outstanding
+ * transaction to complete and triggers error handling. After error handling,
+ * re-enable channel error interrupts, and restart the channel if needed.
+ */
+static void xilinx_dpdma_chan_err_task(unsigned long data)
+{
+ struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+
+ /* Proceed error handling even when polling fails. */
+ xilinx_dpdma_chan_poll_no_ostand(chan);
+
+ xilinx_dpdma_chan_handle_err(chan);
+
+ dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
+ XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
+ XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
+
+ xilinx_dpdma_chan_start(chan);
+ xilinx_dpdma_chan_issue_pending(chan);
+}
+
+/**
+ * xilinx_dpdma_chan_done_task - Per channel tasklet for done interrupt handling
+ * @data: tasklet data to be casted to DPDMA channel structure
+ *
+ * Per channel done interrupt handling tasklet.
+ */
+static void xilinx_dpdma_chan_done_task(unsigned long data)
+{
+ struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
+
+ xilinx_dpdma_chan_cleanup_desc(chan);
+}
+
+static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
+{
+ struct xilinx_dpdma_device *xdev = data;
+ u32 status, error, i;
+ unsigned long masked;
+
+ status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
+ error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
+ if (!status && !error)
+ return IRQ_NONE;
+
+ dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
+
+ if (status & XILINX_DPDMA_INTR_VSYNC)
+ xilinx_dpdma_handle_vsync_intr(xdev);
+
+ masked = (status & XILINX_DPDMA_INTR_DESC_DONE_MASK) >>
+ XILINX_DPDMA_INTR_DESC_DONE_SHIFT;
+ if (masked)
+ for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
+ xilinx_dpdma_chan_desc_done_intr(xdev->chan[i]);
+
+ masked = (status & XILINX_DPDMA_INTR_NO_OSTAND_MASK) >>
+ XILINX_DPDMA_INTR_NO_OSTAND_SHIFT;
+ if (masked)
+ for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
+ xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
+
+ masked = status & XILINX_DPDMA_INTR_ERR_ALL;
+ if (masked || error)
+ xilinx_dpdma_handle_err_intr(xdev, masked, error);
+
+ return IRQ_HANDLED;
+}
+
+/* Initialization operations */
+
+static struct xilinx_dpdma_chan *
+xilinx_dpdma_chan_probe(struct device_node *node,
+ struct xilinx_dpdma_device *xdev)
+{
+ struct xilinx_dpdma_chan *chan;
+
+ chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_device_is_compatible(node, "xlnx,video0")) {
+ chan->id = VIDEO0;
+ } else if (of_device_is_compatible(node, "xlnx,video1")) {
+ chan->id = VIDEO1;
+ } else if (of_device_is_compatible(node, "xlnx,video2")) {
+ chan->id = VIDEO2;
+ } else if (of_device_is_compatible(node, "xlnx,graphics")) {
+ chan->id = GRAPHICS;
+ } else if (of_device_is_compatible(node, "xlnx,audio0")) {
+ chan->id = AUDIO0;
+ } else if (of_device_is_compatible(node, "xlnx,audio1")) {
+ chan->id = AUDIO1;
+ } else {
+ dev_err(xdev->dev, "invalid channel compatible string in DT\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE + XILINX_DPDMA_CH_OFFSET *
+ chan->id;
+ chan->status = IDLE;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->done_list);
+ init_waitqueue_head(&chan->wait_to_stop);
+
+ tasklet_init(&chan->done_task, xilinx_dpdma_chan_done_task,
+ (unsigned long)chan);
+ tasklet_init(&chan->err_task, xilinx_dpdma_chan_err_task,
+ (unsigned long)chan);
+
+ chan->common.device = &xdev->common;
+ chan->xdev = xdev;
+
+ list_add_tail(&chan->common.device_node, &xdev->common.channels);
+ xdev->chan[chan->id] = chan;
+
+ return chan;
+}
+
+static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
+{
+ tasklet_kill(&chan->err_task);
+ tasklet_kill(&chan->done_task);
+ list_del(&chan->common.device_node);
+}
+
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
+ uint32_t chan_id = dma_spec->args[0];
+
+ if (chan_id >= XILINX_DPDMA_NUM_CHAN)
+ return NULL;
+
+ if (!xdev->chan[chan_id])
+ return NULL;
+
+ return dma_get_slave_channel(&xdev->chan[chan_id]->common);
+}
+
+static int xilinx_dpdma_probe(struct platform_device *pdev)
+{
+ struct xilinx_dpdma_device *xdev;
+ struct xilinx_dpdma_chan *chan;
+ struct dma_device *ddev;
+ struct resource *res;
+ struct device_node *node, *child;
+ u32 i;
+ int irq, ret;
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+ ddev = &xdev->common;
+ ddev->dev = &pdev->dev;
+ node = xdev->dev->of_node;
+
+ xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
+ if (IS_ERR(xdev->axi_clk))
+ return PTR_ERR(xdev->axi_clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xdev->reg))
+ return PTR_ERR(xdev->reg);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(xdev->dev, "failed to get platform irq\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(xdev->dev, irq, xilinx_dpdma_irq_handler,
+ IRQF_SHARED, dev_name(xdev->dev), xdev);
+ if (ret) {
+ dev_err(xdev->dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&xdev->common.channels);
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
+ ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
+
+ ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
+ ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
+ ddev->device_prep_slave_sg = xilinx_dpdma_prep_slave_sg;
+ ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
+ ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
+ ddev->device_tx_status = xilinx_dpdma_tx_status;
+ ddev->device_issue_pending = xilinx_dpdma_issue_pending;
+ ddev->device_config = xilinx_dpdma_config;
+ ddev->device_pause = xilinx_dpdma_pause;
+ ddev->device_resume = xilinx_dpdma_resume;
+ ddev->device_terminate_all = xilinx_dpdma_terminate_all;
+ ddev->device_synchronize = xilinx_dpdma_synchronize;
+ ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
+ ddev->directions = BIT(DMA_MEM_TO_DEV);
+ ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ for_each_child_of_node(node, child) {
+ chan = xilinx_dpdma_chan_probe(child, xdev);
+ if (IS_ERR(chan)) {
+ dev_err(xdev->dev, "failed to probe a channel\n");
+ ret = PTR_ERR(chan);
+ goto error;
+ }
+ }
+
+ xdev->ext_addr = sizeof(dma_addr_t) > 4;
+ if (xdev->ext_addr)
+ xdev->desc_addr = xilinx_dpdma_sw_desc_addr_64;
+ else
+ xdev->desc_addr = xilinx_dpdma_sw_desc_addr_32;
+
+ ret = clk_prepare_enable(xdev->axi_clk);
+ if (ret) {
+ dev_err(xdev->dev, "failed to enable the axi clock\n");
+ goto error;
+ }
+
+ ret = dma_async_device_register(ddev);
+ if (ret) {
+ dev_err(xdev->dev, "failed to register the dma device\n");
+ goto error_dma_async;
+ }
+
+ ret = of_dma_controller_register(xdev->dev->of_node,
+ of_dma_xilinx_xlate, ddev);
+ if (ret) {
+ dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
+ goto error_of_dma;
+ }
+
+ xilinx_dpdma_enable_intr(xdev);
+
+ xilinx_dpdma_debugfs_init(&pdev->dev);
+
+ dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
+
+ return 0;
+
+error_of_dma:
+ dma_async_device_unregister(ddev);
+error_dma_async:
+ clk_disable_unprepare(xdev->axi_clk);
+error:
+ for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
+ if (xdev->chan[i])
+ xilinx_dpdma_chan_remove(xdev->chan[i]);
+
+ return ret;
+}
+
+static int xilinx_dpdma_remove(struct platform_device *pdev)
+{
+ struct xilinx_dpdma_device *xdev;
+ unsigned int i;
+
+ xdev = platform_get_drvdata(pdev);
+
+ xilinx_dpdma_disable_intr(xdev);
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&xdev->common);
+ clk_disable_unprepare(xdev->axi_clk);
+
+ for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
+ if (xdev->chan[i])
+ xilinx_dpdma_chan_remove(xdev->chan[i]);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_dpdma_of_match[] = {
+ { .compatible = "xlnx,dpdma",},
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
+
+static struct platform_driver xilinx_dpdma_driver = {
+ .probe = xilinx_dpdma_probe,
+ .remove = xilinx_dpdma_remove,
+ .driver = {
+ .name = "xilinx-dpdma",
+ .of_match_table = xilinx_dpdma_of_match,
+ },
+};
+
+module_platform_driver(xilinx_dpdma_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DPDMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_frmbuf.c b/drivers/dma/xilinx/xilinx_frmbuf.c
new file mode 100644
index 000000000000..a135af17129b
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_frmbuf.c
@@ -0,0 +1,1709 @@
+/*
+ * DMAEngine driver for Xilinx Framebuffer IP
+ *
+ * Copyright (C) 2016,2017 Xilinx, Inc. All rights reserved.
+ *
+ * Authors: Radhey Shyam Pandey <radheys@xilinx.com>
+ * John Nichols <jnichol@xilinx.com>
+ * Jeffrey Mouroux <jmouroux@xilinx.com>
+ *
+ * Based on the Freescale DMA driver.
+ *
+ * Description:
+ * The AXI Framebuffer core is a soft Xilinx IP core that
+ * provides high-bandwidth direct memory access between memory
+ * and AXI4-Stream.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/dmapool.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "../dmaengine.h"
+
+/* Register/Descriptor Offsets */
+#define XILINX_FRMBUF_CTRL_OFFSET 0x00
+#define XILINX_FRMBUF_GIE_OFFSET 0x04
+#define XILINX_FRMBUF_IE_OFFSET 0x08
+#define XILINX_FRMBUF_ISR_OFFSET 0x0c
+#define XILINX_FRMBUF_WIDTH_OFFSET 0x10
+#define XILINX_FRMBUF_HEIGHT_OFFSET 0x18
+#define XILINX_FRMBUF_STRIDE_OFFSET 0x20
+#define XILINX_FRMBUF_FMT_OFFSET 0x28
+#define XILINX_FRMBUF_ADDR_OFFSET 0x30
+#define XILINX_FRMBUF_ADDR2_OFFSET 0x3c
+#define XILINX_FRMBUF_FID_OFFSET 0x48
+
+/* Control Registers */
+#define XILINX_FRMBUF_CTRL_AP_START BIT(0)
+#define XILINX_FRMBUF_CTRL_AP_DONE BIT(1)
+#define XILINX_FRMBUF_CTRL_AP_IDLE BIT(2)
+#define XILINX_FRMBUF_CTRL_AP_READY BIT(3)
+#define XILINX_FRMBUF_CTRL_FLUSH BIT(5)
+#define XILINX_FRMBUF_CTRL_FLUSH_DONE BIT(6)
+#define XILINX_FRMBUF_CTRL_AUTO_RESTART BIT(7)
+#define XILINX_FRMBUF_GIE_EN BIT(0)
+
+/* Interrupt Status and Control */
+#define XILINX_FRMBUF_IE_AP_DONE BIT(0)
+#define XILINX_FRMBUF_IE_AP_READY BIT(1)
+
+#define XILINX_FRMBUF_ISR_AP_DONE_IRQ BIT(0)
+#define XILINX_FRMBUF_ISR_AP_READY_IRQ BIT(1)
+
+#define XILINX_FRMBUF_ISR_ALL_IRQ_MASK \
+ (XILINX_FRMBUF_ISR_AP_DONE_IRQ | \
+ XILINX_FRMBUF_ISR_AP_READY_IRQ)
+
+/* Video Format Register Settings */
+#define XILINX_FRMBUF_FMT_RGBX8 10
+#define XILINX_FRMBUF_FMT_YUVX8 11
+#define XILINX_FRMBUF_FMT_YUYV8 12
+#define XILINX_FRMBUF_FMT_RGBA8 13
+#define XILINX_FRMBUF_FMT_YUVA8 14
+#define XILINX_FRMBUF_FMT_RGBX10 15
+#define XILINX_FRMBUF_FMT_YUVX10 16
+#define XILINX_FRMBUF_FMT_Y_UV8 18
+#define XILINX_FRMBUF_FMT_Y_UV8_420 19
+#define XILINX_FRMBUF_FMT_RGB8 20
+#define XILINX_FRMBUF_FMT_YUV8 21
+#define XILINX_FRMBUF_FMT_Y_UV10 22
+#define XILINX_FRMBUF_FMT_Y_UV10_420 23
+#define XILINX_FRMBUF_FMT_Y8 24
+#define XILINX_FRMBUF_FMT_Y10 25
+#define XILINX_FRMBUF_FMT_BGRA8 26
+#define XILINX_FRMBUF_FMT_BGRX8 27
+#define XILINX_FRMBUF_FMT_UYVY8 28
+#define XILINX_FRMBUF_FMT_BGR8 29
+#define XILINX_FRMBUF_FMT_RGBX12 30
+#define XILINX_FRMBUF_FMT_RGB16 35
+
+/* FID Register */
+#define XILINX_FRMBUF_FID_MASK BIT(0)
+
+#define XILINX_FRMBUF_ALIGN_MUL 8
+
+#define WAIT_FOR_FLUSH_DONE 25
+
+/* Pixels per clock property flag */
+#define XILINX_PPC_PROP BIT(0)
+#define XILINX_FLUSH_PROP BIT(1)
+#define XILINX_FID_PROP BIT(2)
+#define XILINX_CLK_PROP BIT(3)
+
+#define XILINX_FRMBUF_MAX_HEIGHT (4320)
+#define XILINX_FRMBUF_MIN_HEIGHT (64)
+#define XILINX_FRMBUF_MAX_WIDTH (8192)
+#define XILINX_FRMBUF_MIN_WIDTH (64)
+
+/**
+ * struct xilinx_frmbuf_desc_hw - Hardware Descriptor
+ * @luma_plane_addr: Luma or packed plane buffer address
+ * @chroma_plane_addr: Chroma plane buffer address
+ * @vsize: Vertical Size
+ * @hsize: Horizontal Size
+ * @stride: Number of bytes between the first
+ * pixels of each horizontal line
+ */
+struct xilinx_frmbuf_desc_hw {
+ dma_addr_t luma_plane_addr;
+ dma_addr_t chroma_plane_addr;
+ u32 vsize;
+ u32 hsize;
+ u32 stride;
+};
+
+/**
+ * struct xilinx_frmbuf_tx_descriptor - Per Transaction structure
+ * @async_tx: Async transaction descriptor
+ * @hw: Hardware descriptor
+ * @node: Node in the channel descriptors list
+ * @fid: Field ID of buffer
+ * @earlycb: Whether the callback should be called when in staged state
+ */
+struct xilinx_frmbuf_tx_descriptor {
+ struct dma_async_tx_descriptor async_tx;
+ struct xilinx_frmbuf_desc_hw hw;
+ struct list_head node;
+ u32 fid;
+ u32 earlycb;
+};
+
+/**
+ * struct xilinx_frmbuf_chan - Driver specific dma channel structure
+ * @xdev: Driver specific device structure
+ * @lock: Descriptor operation lock
+ * @chan_node: Member of a list of framebuffer channel instances
+ * @pending_list: Descriptors waiting
+ * @done_list: Complete descriptors
+ * @staged_desc: Next buffer to be programmed
+ * @active_desc: Currently active buffer being read/written to
+ * @common: DMA common channel
+ * @dev: The dma device
+ * @write_addr: callback that will write dma addresses to IP (32 or 64 bit)
+ * @irq: Channel IRQ
+ * @direction: Transfer direction
+ * @idle: Channel idle state
+ * @tasklet: Cleanup work after irq
+ * @vid_fmt: Reference to currently assigned video format description
+ * @hw_fid: FID enabled in hardware flag
+ * @mode: Select operation mode
+ */
+struct xilinx_frmbuf_chan {
+ struct xilinx_frmbuf_device *xdev;
+ /* Descriptor operation lock */
+ spinlock_t lock;
+ struct list_head chan_node;
+ struct list_head pending_list;
+ struct list_head done_list;
+ struct xilinx_frmbuf_tx_descriptor *staged_desc;
+ struct xilinx_frmbuf_tx_descriptor *active_desc;
+ struct dma_chan common;
+ struct device *dev;
+ void (*write_addr)(struct xilinx_frmbuf_chan *chan, u32 reg,
+ dma_addr_t value);
+ int irq;
+ enum dma_transfer_direction direction;
+ bool idle;
+ struct tasklet_struct tasklet;
+ const struct xilinx_frmbuf_format_desc *vid_fmt;
+ bool hw_fid;
+ enum operation_mode mode;
+};
+
+/**
+ * struct xilinx_frmbuf_format_desc - lookup table to match fourcc to format
+ * @dts_name: Device tree name for this entry.
+ * @id: Format ID
+ * @bpw: Bits of pixel data + padding in a 32-bit word (luma plane for semi-pl)
+ * @ppw: Number of pixels represented in a 32-bit word (luma plane for semi-pl)
+ * @num_planes: Expected number of plane buffers in framebuffer for this format
+ * @drm_fmt: DRM video framework equivalent fourcc code
+ * @v4l2_fmt: Video 4 Linux framework equivalent fourcc code
+ * @fmt_bitmask: Flag identifying this format in device-specific "enabled"
+ * bitmap
+ */
+struct xilinx_frmbuf_format_desc {
+ const char *dts_name;
+ u32 id;
+ u32 bpw;
+ u32 ppw;
+ u32 num_planes;
+ u32 drm_fmt;
+ u32 v4l2_fmt;
+ u32 fmt_bitmask;
+};
+
+static LIST_HEAD(frmbuf_chan_list);
+static DEFINE_MUTEX(frmbuf_chan_list_lock);
+
+static const struct xilinx_frmbuf_format_desc xilinx_frmbuf_formats[] = {
+ {
+ .dts_name = "xbgr8888",
+ .id = XILINX_FRMBUF_FMT_RGBX8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XBGR8888,
+ .v4l2_fmt = V4L2_PIX_FMT_BGRX32,
+ .fmt_bitmask = BIT(0),
+ },
+ {
+ .dts_name = "xbgr2101010",
+ .id = XILINX_FRMBUF_FMT_RGBX10,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XBGR2101010,
+ .v4l2_fmt = V4L2_PIX_FMT_XBGR30,
+ .fmt_bitmask = BIT(1),
+ },
+ {
+ .dts_name = "xrgb8888",
+ .id = XILINX_FRMBUF_FMT_BGRX8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XRGB8888,
+ .v4l2_fmt = V4L2_PIX_FMT_XBGR32,
+ .fmt_bitmask = BIT(2),
+ },
+ {
+ .dts_name = "xvuy8888",
+ .id = XILINX_FRMBUF_FMT_YUVX8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XVUY8888,
+ .v4l2_fmt = V4L2_PIX_FMT_XVUY32,
+ .fmt_bitmask = BIT(5),
+ },
+ {
+ .dts_name = "vuy888",
+ .id = XILINX_FRMBUF_FMT_YUV8,
+ .bpw = 24,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_VUY888,
+ .v4l2_fmt = V4L2_PIX_FMT_VUY24,
+ .fmt_bitmask = BIT(6),
+ },
+ {
+ .dts_name = "yuvx2101010",
+ .id = XILINX_FRMBUF_FMT_YUVX10,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XVUY2101010,
+ .v4l2_fmt = V4L2_PIX_FMT_XVUY10,
+ .fmt_bitmask = BIT(7),
+ },
+ {
+ .dts_name = "yuyv",
+ .id = XILINX_FRMBUF_FMT_YUYV8,
+ .bpw = 32,
+ .ppw = 2,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_YUYV,
+ .v4l2_fmt = V4L2_PIX_FMT_YUYV,
+ .fmt_bitmask = BIT(8),
+ },
+ {
+ .dts_name = "uyvy",
+ .id = XILINX_FRMBUF_FMT_UYVY8,
+ .bpw = 32,
+ .ppw = 2,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_UYVY,
+ .v4l2_fmt = V4L2_PIX_FMT_UYVY,
+ .fmt_bitmask = BIT(9),
+ },
+ {
+ .dts_name = "nv16",
+ .id = XILINX_FRMBUF_FMT_Y_UV8,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 2,
+ .drm_fmt = DRM_FORMAT_NV16,
+ .v4l2_fmt = V4L2_PIX_FMT_NV16M,
+ .fmt_bitmask = BIT(11),
+ },
+ {
+ .dts_name = "nv16",
+ .id = XILINX_FRMBUF_FMT_Y_UV8,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 2,
+ .drm_fmt = 0,
+ .v4l2_fmt = V4L2_PIX_FMT_NV16,
+ .fmt_bitmask = BIT(11),
+ },
+ {
+ .dts_name = "nv12",
+ .id = XILINX_FRMBUF_FMT_Y_UV8_420,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 2,
+ .drm_fmt = DRM_FORMAT_NV12,
+ .v4l2_fmt = V4L2_PIX_FMT_NV12M,
+ .fmt_bitmask = BIT(12),
+ },
+ {
+ .dts_name = "nv12",
+ .id = XILINX_FRMBUF_FMT_Y_UV8_420,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 2,
+ .drm_fmt = 0,
+ .v4l2_fmt = V4L2_PIX_FMT_NV12,
+ .fmt_bitmask = BIT(12),
+ },
+ {
+ .dts_name = "xv15",
+ .id = XILINX_FRMBUF_FMT_Y_UV10_420,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 2,
+ .drm_fmt = DRM_FORMAT_XV15,
+ .v4l2_fmt = V4L2_PIX_FMT_XV15M,
+ .fmt_bitmask = BIT(13),
+ },
+ {
+ .dts_name = "xv15",
+ .id = XILINX_FRMBUF_FMT_Y_UV10_420,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 2,
+ .drm_fmt = 0,
+ .v4l2_fmt = V4L2_PIX_FMT_XV15,
+ .fmt_bitmask = BIT(13),
+ },
+ {
+ .dts_name = "xv20",
+ .id = XILINX_FRMBUF_FMT_Y_UV10,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 2,
+ .drm_fmt = DRM_FORMAT_XV20,
+ .v4l2_fmt = V4L2_PIX_FMT_XV20M,
+ .fmt_bitmask = BIT(14),
+ },
+ {
+ .dts_name = "xv20",
+ .id = XILINX_FRMBUF_FMT_Y_UV10,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 2,
+ .drm_fmt = 0,
+ .v4l2_fmt = V4L2_PIX_FMT_XV20,
+ .fmt_bitmask = BIT(14),
+ },
+ {
+ .dts_name = "bgr888",
+ .id = XILINX_FRMBUF_FMT_RGB8,
+ .bpw = 24,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .v4l2_fmt = V4L2_PIX_FMT_RGB24,
+ .fmt_bitmask = BIT(15),
+ },
+ {
+ .dts_name = "y8",
+ .id = XILINX_FRMBUF_FMT_Y8,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_Y8,
+ .v4l2_fmt = V4L2_PIX_FMT_GREY,
+ .fmt_bitmask = BIT(16),
+ },
+ {
+ .dts_name = "y10",
+ .id = XILINX_FRMBUF_FMT_Y10,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_Y10,
+ .v4l2_fmt = V4L2_PIX_FMT_Y10,
+ .fmt_bitmask = BIT(17),
+ },
+ {
+ .dts_name = "rgb888",
+ .id = XILINX_FRMBUF_FMT_BGR8,
+ .bpw = 24,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .v4l2_fmt = V4L2_PIX_FMT_BGR24,
+ .fmt_bitmask = BIT(18),
+ },
+ {
+ .dts_name = "abgr8888",
+ .id = XILINX_FRMBUF_FMT_RGBA8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_ABGR8888,
+ .v4l2_fmt = 0,
+ .fmt_bitmask = BIT(19),
+ },
+ {
+ .dts_name = "argb8888",
+ .id = XILINX_FRMBUF_FMT_BGRA8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_ARGB8888,
+ .v4l2_fmt = 0,
+ .fmt_bitmask = BIT(20),
+ },
+ {
+ .dts_name = "avuy8888",
+ .id = XILINX_FRMBUF_FMT_YUVA8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_AVUY,
+ .v4l2_fmt = 0,
+ .fmt_bitmask = BIT(21),
+ },
+ {
+ .dts_name = "xbgr4121212",
+ .id = XILINX_FRMBUF_FMT_RGBX12,
+ .bpw = 40,
+ .ppw = 1,
+ .num_planes = 1,
+ .v4l2_fmt = V4L2_PIX_FMT_XBGR40,
+ .fmt_bitmask = BIT(22),
+ },
+ {
+ .dts_name = "rgb16",
+ .id = XILINX_FRMBUF_FMT_RGB16,
+ .bpw = 48,
+ .ppw = 1,
+ .num_planes = 1,
+ .v4l2_fmt = V4L2_PIX_FMT_BGR48,
+ .fmt_bitmask = BIT(23),
+ },
+};
+
+/**
+ * struct xilinx_frmbuf_feature - dt or IP property structure
+ * @direction: dma transfer mode and direction
+ * @flags: Bitmask of properties enabled in IP or dt
+ */
+struct xilinx_frmbuf_feature {
+ enum dma_transfer_direction direction;
+ u32 flags;
+};
+
+/**
+ * struct xilinx_frmbuf_device - dma device structure
+ * @regs: I/O mapped base address
+ * @dev: Device Structure
+ * @common: DMA device structure
+ * @chan: Driver specific dma channel
+ * @rst_gpio: GPIO reset
+ * @enabled_vid_fmts: Bitmask of video formats enabled in hardware
+ * @drm_memory_fmts: Array of supported DRM fourcc codes
+ * @drm_fmt_cnt: Count of supported DRM fourcc codes
+ * @v4l2_memory_fmts: Array of supported V4L2 fourcc codes
+ * @v4l2_fmt_cnt: Count of supported V4L2 fourcc codes
+ * @cfg: Pointer to Framebuffer Feature config struct
+ * @max_width: Maximum pixel width supported in IP.
+ * @max_height: Maximum number of lines supported in IP.
+ * @ap_clk: Video core clock
+ */
+struct xilinx_frmbuf_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct xilinx_frmbuf_chan chan;
+ struct gpio_desc *rst_gpio;
+ u32 enabled_vid_fmts;
+ u32 drm_memory_fmts[ARRAY_SIZE(xilinx_frmbuf_formats)];
+ u32 drm_fmt_cnt;
+ u32 v4l2_memory_fmts[ARRAY_SIZE(xilinx_frmbuf_formats)];
+ u32 v4l2_fmt_cnt;
+ const struct xilinx_frmbuf_feature *cfg;
+ u32 max_width;
+ u32 max_height;
+ struct clk *ap_clk;
+};
+
+static const struct xilinx_frmbuf_feature xlnx_fbwr_cfg_v20 = {
+ .direction = DMA_DEV_TO_MEM,
+};
+
+static const struct xilinx_frmbuf_feature xlnx_fbwr_cfg_v21 = {
+ .direction = DMA_DEV_TO_MEM,
+ .flags = XILINX_PPC_PROP | XILINX_FLUSH_PROP
+ | XILINX_FID_PROP | XILINX_CLK_PROP,
+};
+
+static const struct xilinx_frmbuf_feature xlnx_fbrd_cfg_v20 = {
+ .direction = DMA_MEM_TO_DEV,
+};
+
+static const struct xilinx_frmbuf_feature xlnx_fbrd_cfg_v21 = {
+ .direction = DMA_MEM_TO_DEV,
+ .flags = XILINX_PPC_PROP | XILINX_FLUSH_PROP
+ | XILINX_FID_PROP | XILINX_CLK_PROP,
+};
+
+static const struct of_device_id xilinx_frmbuf_of_ids[] = {
+ { .compatible = "xlnx,axi-frmbuf-wr-v2",
+ .data = (void *)&xlnx_fbwr_cfg_v20},
+ { .compatible = "xlnx,axi-frmbuf-wr-v2.1",
+ .data = (void *)&xlnx_fbwr_cfg_v21},
+ { .compatible = "xlnx,axi-frmbuf-rd-v2",
+ .data = (void *)&xlnx_fbrd_cfg_v20},
+ { .compatible = "xlnx,axi-frmbuf-rd-v2.1",
+ .data = (void *)&xlnx_fbrd_cfg_v21},
+ {/* end of list */}
+};
+
+/******************************PROTOTYPES*************************************/
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct xilinx_frmbuf_chan, common)
+#define to_dma_tx_descriptor(tx) \
+ container_of(tx, struct xilinx_frmbuf_tx_descriptor, async_tx)
+
+static inline u32 frmbuf_read(struct xilinx_frmbuf_chan *chan, u32 reg)
+{
+ return ioread32(chan->xdev->regs + reg);
+}
+
+static inline void frmbuf_write(struct xilinx_frmbuf_chan *chan, u32 reg,
+ u32 value)
+{
+ iowrite32(value, chan->xdev->regs + reg);
+}
+
+static inline void frmbuf_writeq(struct xilinx_frmbuf_chan *chan, u32 reg,
+ u64 value)
+{
+ iowrite32(lower_32_bits(value), chan->xdev->regs + reg);
+ iowrite32(upper_32_bits(value), chan->xdev->regs + reg + 4);
+}
+
+static void writeq_addr(struct xilinx_frmbuf_chan *chan, u32 reg,
+ dma_addr_t addr)
+{
+ frmbuf_writeq(chan, reg, (u64)addr);
+}
+
+static void write_addr(struct xilinx_frmbuf_chan *chan, u32 reg,
+ dma_addr_t addr)
+{
+ frmbuf_write(chan, reg, addr);
+}
+
+static inline void frmbuf_clr(struct xilinx_frmbuf_chan *chan, u32 reg,
+ u32 clr)
+{
+ frmbuf_write(chan, reg, frmbuf_read(chan, reg) & ~clr);
+}
+
+static inline void frmbuf_set(struct xilinx_frmbuf_chan *chan, u32 reg,
+ u32 set)
+{
+ frmbuf_write(chan, reg, frmbuf_read(chan, reg) | set);
+}
+
+static void frmbuf_init_format_array(struct xilinx_frmbuf_device *xdev)
+{
+ u32 i, cnt;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_frmbuf_formats); i++) {
+ if (!(xdev->enabled_vid_fmts &
+ xilinx_frmbuf_formats[i].fmt_bitmask))
+ continue;
+
+ if (xilinx_frmbuf_formats[i].drm_fmt) {
+ cnt = xdev->drm_fmt_cnt++;
+ xdev->drm_memory_fmts[cnt] =
+ xilinx_frmbuf_formats[i].drm_fmt;
+ }
+
+ if (xilinx_frmbuf_formats[i].v4l2_fmt) {
+ cnt = xdev->v4l2_fmt_cnt++;
+ xdev->v4l2_memory_fmts[cnt] =
+ xilinx_frmbuf_formats[i].v4l2_fmt;
+ }
+ }
+}
+
+static struct xilinx_frmbuf_chan *frmbuf_find_chan(struct dma_chan *chan)
+{
+ struct xilinx_frmbuf_chan *xil_chan;
+ bool found_xchan = false;
+
+ mutex_lock(&frmbuf_chan_list_lock);
+ list_for_each_entry(xil_chan, &frmbuf_chan_list, chan_node) {
+ if (chan == &xil_chan->common) {
+ found_xchan = true;
+ break;
+ }
+ }
+ mutex_unlock(&frmbuf_chan_list_lock);
+
+ if (!found_xchan) {
+ dev_dbg(chan->device->dev,
+ "dma chan not a Video Framebuffer channel instance\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return xil_chan;
+}
+
+static struct xilinx_frmbuf_device *frmbuf_find_dev(struct dma_chan *chan)
+{
+ struct xilinx_frmbuf_chan *xchan, *temp;
+ struct xilinx_frmbuf_device *xdev;
+ bool is_frmbuf_chan = false;
+
+ list_for_each_entry_safe(xchan, temp, &frmbuf_chan_list, chan_node) {
+ if (chan == &xchan->common)
+ is_frmbuf_chan = true;
+ }
+
+ if (!is_frmbuf_chan)
+ return ERR_PTR(-ENODEV);
+
+ xchan = to_xilinx_chan(chan);
+ xdev = container_of(xchan, struct xilinx_frmbuf_device, chan);
+
+ return xdev;
+}
+
+static int frmbuf_verify_format(struct dma_chan *chan, u32 fourcc, u32 type)
+{
+ struct xilinx_frmbuf_chan *xil_chan = to_xilinx_chan(chan);
+ u32 i, sz = ARRAY_SIZE(xilinx_frmbuf_formats);
+
+ for (i = 0; i < sz; i++) {
+ if ((type == XDMA_DRM &&
+ fourcc != xilinx_frmbuf_formats[i].drm_fmt) ||
+ (type == XDMA_V4L2 &&
+ fourcc != xilinx_frmbuf_formats[i].v4l2_fmt))
+ continue;
+
+ if (!(xilinx_frmbuf_formats[i].fmt_bitmask &
+ xil_chan->xdev->enabled_vid_fmts))
+ return -EINVAL;
+
+ /*
+ * The Alpha color formats are supported in Framebuffer Read
+ * IP only as corresponding DRM formats.
+ */
+ if (type == XDMA_DRM &&
+ (xilinx_frmbuf_formats[i].drm_fmt == DRM_FORMAT_ABGR8888 ||
+ xilinx_frmbuf_formats[i].drm_fmt == DRM_FORMAT_ARGB8888 ||
+ xilinx_frmbuf_formats[i].drm_fmt == DRM_FORMAT_AVUY) &&
+ xil_chan->direction != DMA_MEM_TO_DEV)
+ return -EINVAL;
+
+ xil_chan->vid_fmt = &xilinx_frmbuf_formats[i];
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void xilinx_xdma_set_config(struct dma_chan *chan, u32 fourcc, u32 type)
+{
+ struct xilinx_frmbuf_chan *xil_chan;
+ int ret;
+
+ xil_chan = frmbuf_find_chan(chan);
+ if (IS_ERR(xil_chan))
+ return;
+ ret = frmbuf_verify_format(chan, fourcc, type);
+ if (ret == -EINVAL) {
+ dev_err(chan->device->dev,
+ "Framebuffer not configured for fourcc 0x%x\n",
+ fourcc);
+ return;
+ }
+}
+
+void xilinx_xdma_set_mode(struct dma_chan *chan, enum operation_mode
+ mode)
+{
+ struct xilinx_frmbuf_chan *xil_chan;
+
+ xil_chan = frmbuf_find_chan(chan);
+ if (IS_ERR(xil_chan))
+ return;
+
+ xil_chan->mode = mode;
+
+ return;
+
+} EXPORT_SYMBOL_GPL(xilinx_xdma_set_mode);
+
+void xilinx_xdma_drm_config(struct dma_chan *chan, u32 drm_fourcc)
+{
+ xilinx_xdma_set_config(chan, drm_fourcc, XDMA_DRM);
+
+} EXPORT_SYMBOL_GPL(xilinx_xdma_drm_config);
+
+void xilinx_xdma_v4l2_config(struct dma_chan *chan, u32 v4l2_fourcc)
+{
+ xilinx_xdma_set_config(chan, v4l2_fourcc, XDMA_V4L2);
+
+} EXPORT_SYMBOL_GPL(xilinx_xdma_v4l2_config);
+
+int xilinx_xdma_get_drm_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts)
+{
+ struct xilinx_frmbuf_device *xdev;
+
+ xdev = frmbuf_find_dev(chan);
+
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ *fmt_cnt = xdev->drm_fmt_cnt;
+ *fmts = xdev->drm_memory_fmts;
+
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_get_drm_vid_fmts);
+
+int xilinx_xdma_get_v4l2_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts)
+{
+ struct xilinx_frmbuf_device *xdev;
+
+ xdev = frmbuf_find_dev(chan);
+
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ *fmt_cnt = xdev->v4l2_fmt_cnt;
+ *fmts = xdev->v4l2_memory_fmts;
+
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_get_v4l2_vid_fmts);
+
+int xilinx_xdma_get_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx, u32 *fid)
+{
+ struct xilinx_frmbuf_device *xdev;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ xdev = frmbuf_find_dev(chan);
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ if (!async_tx || !fid)
+ return -EINVAL;
+
+ if (xdev->chan.direction != DMA_DEV_TO_MEM)
+ return -EINVAL;
+
+ desc = to_dma_tx_descriptor(async_tx);
+ if (!desc)
+ return -EINVAL;
+
+ *fid = desc->fid;
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_get_fid);
+
+int xilinx_xdma_set_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx, u32 fid)
+{
+ struct xilinx_frmbuf_device *xdev;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ if (fid > 1 || !async_tx)
+ return -EINVAL;
+
+ xdev = frmbuf_find_dev(chan);
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ if (xdev->chan.direction != DMA_MEM_TO_DEV)
+ return -EINVAL;
+
+ desc = to_dma_tx_descriptor(async_tx);
+ if (!desc)
+ return -EINVAL;
+
+ desc->fid = fid;
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_set_fid);
+
+int xilinx_xdma_get_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 *earlycb)
+{
+ struct xilinx_frmbuf_device *xdev;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ xdev = frmbuf_find_dev(chan);
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ if (!async_tx || !earlycb)
+ return -EINVAL;
+
+ desc = to_dma_tx_descriptor(async_tx);
+ if (!desc)
+ return -EINVAL;
+
+ *earlycb = desc->earlycb;
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_get_earlycb);
+
+int xilinx_xdma_set_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 earlycb)
+{
+ struct xilinx_frmbuf_device *xdev;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ if (!async_tx)
+ return -EINVAL;
+
+ xdev = frmbuf_find_dev(chan);
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ desc = to_dma_tx_descriptor(async_tx);
+ if (!desc)
+ return -EINVAL;
+
+ desc->earlycb = earlycb;
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_set_earlycb);
+
+/**
+ * of_dma_xilinx_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success or error code on error
+ */
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xilinx_frmbuf_device *xdev = ofdma->of_dma_data;
+
+ return dma_get_slave_channel(&xdev->chan.common);
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors alloc and free
+ */
+
+/**
+ * xilinx_frmbuf_tx_descriptor - Allocate transaction descriptor
+ * @chan: Driver specific dma channel
+ *
+ * Return: The allocated descriptor on success and NULL on failure.
+ */
+static struct xilinx_frmbuf_tx_descriptor *
+xilinx_frmbuf_alloc_tx_descriptor(struct xilinx_frmbuf_chan *chan)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ return desc;
+}
+
+/**
+ * xilinx_frmbuf_free_desc_list - Free descriptors list
+ * @chan: Driver specific dma channel
+ * @list: List to parse and delete the descriptor
+ */
+static void xilinx_frmbuf_free_desc_list(struct xilinx_frmbuf_chan *chan,
+ struct list_head *list)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node) {
+ list_del(&desc->node);
+ kfree(desc);
+ }
+}
+
+/**
+ * xilinx_frmbuf_free_descriptors - Free channel descriptors
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_free_descriptors(struct xilinx_frmbuf_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xilinx_frmbuf_free_desc_list(chan, &chan->pending_list);
+ xilinx_frmbuf_free_desc_list(chan, &chan->done_list);
+ kfree(chan->active_desc);
+ kfree(chan->staged_desc);
+
+ chan->staged_desc = NULL;
+ chan->active_desc = NULL;
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_frmbuf_free_chan_resources - Free channel resources
+ * @dchan: DMA channel
+ */
+static void xilinx_frmbuf_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_frmbuf_free_descriptors(chan);
+}
+
+/**
+ * xilinx_frmbuf_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_chan_desc_cleanup(struct xilinx_frmbuf_chan *chan)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ list_del(&desc->node);
+
+ /* Run the link descriptor callback function */
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ /* Run any dependencies, then free the descriptor */
+ dma_run_dependencies(&desc->async_tx);
+ kfree(desc);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_frmbuf_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Xilinx frmbuf channel structure
+ */
+static void xilinx_frmbuf_do_tasklet(unsigned long data)
+{
+ struct xilinx_frmbuf_chan *chan = (struct xilinx_frmbuf_chan *)data;
+
+ xilinx_frmbuf_chan_desc_cleanup(chan);
+}
+
+/**
+ * xilinx_frmbuf_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_frmbuf_alloc_chan_resources(struct dma_chan *dchan)
+{
+ dma_cookie_init(dchan);
+
+ return 0;
+}
+
+/**
+ * xilinx_frmbuf_tx_status - Get frmbuf transaction status
+ * @dchan: DMA channel
+ * @cookie: Transaction identifier
+ * @txstate: Transaction state
+ *
+ * Return: fmrbuf transaction status
+ */
+static enum dma_status xilinx_frmbuf_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+/**
+ * xilinx_frmbuf_halt - Halt frmbuf channel
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_halt(struct xilinx_frmbuf_chan *chan)
+{
+ frmbuf_clr(chan, XILINX_FRMBUF_CTRL_OFFSET,
+ XILINX_FRMBUF_CTRL_AP_START |
+ chan->mode);
+ chan->idle = true;
+}
+
+/**
+ * xilinx_frmbuf_start - Start dma channel
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_start(struct xilinx_frmbuf_chan *chan)
+{
+ frmbuf_set(chan, XILINX_FRMBUF_CTRL_OFFSET,
+ XILINX_FRMBUF_CTRL_AP_START |
+ chan->mode);
+ chan->idle = false;
+}
+
+/**
+ * xilinx_frmbuf_complete_descriptor - Mark the active descriptor as complete
+ * This function is invoked with spinlock held
+ * @chan : xilinx frmbuf channel
+ *
+ * CONTEXT: hardirq
+ */
+static void xilinx_frmbuf_complete_descriptor(struct xilinx_frmbuf_chan *chan)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc = chan->active_desc;
+
+ /*
+ * In case of frame buffer write, read the fid register
+ * and associate it with descriptor
+ */
+ if (chan->direction == DMA_DEV_TO_MEM && chan->hw_fid)
+ desc->fid = frmbuf_read(chan, XILINX_FRMBUF_FID_OFFSET) &
+ XILINX_FRMBUF_FID_MASK;
+
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
+}
+
+/**
+ * xilinx_frmbuf_start_transfer - Starts frmbuf transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_frmbuf_start_transfer(struct xilinx_frmbuf_chan *chan)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ if (!chan->idle)
+ return;
+
+ if (chan->staged_desc) {
+ chan->active_desc = chan->staged_desc;
+ chan->staged_desc = NULL;
+ }
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ desc = list_first_entry(&chan->pending_list,
+ struct xilinx_frmbuf_tx_descriptor,
+ node);
+
+ if (desc->earlycb == EARLY_CALLBACK_LOW_LATENCY) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ callback(callback_param);
+ desc->async_tx.callback = NULL;
+ chan->active_desc = desc;
+ }
+ }
+
+ /* Start the transfer */
+ chan->write_addr(chan, XILINX_FRMBUF_ADDR_OFFSET,
+ desc->hw.luma_plane_addr);
+ chan->write_addr(chan, XILINX_FRMBUF_ADDR2_OFFSET,
+ desc->hw.chroma_plane_addr);
+
+ /* HW expects these parameters to be same for one transaction */
+ frmbuf_write(chan, XILINX_FRMBUF_WIDTH_OFFSET, desc->hw.hsize);
+ frmbuf_write(chan, XILINX_FRMBUF_STRIDE_OFFSET, desc->hw.stride);
+ frmbuf_write(chan, XILINX_FRMBUF_HEIGHT_OFFSET, desc->hw.vsize);
+ frmbuf_write(chan, XILINX_FRMBUF_FMT_OFFSET, chan->vid_fmt->id);
+
+ /* If it is framebuffer read IP set the FID */
+ if (chan->direction == DMA_MEM_TO_DEV && chan->hw_fid)
+ frmbuf_write(chan, XILINX_FRMBUF_FID_OFFSET, desc->fid);
+
+ /* Start the hardware */
+ xilinx_frmbuf_start(chan);
+ list_del(&desc->node);
+
+ /* No staging descriptor required when auto restart is disabled */
+ if (chan->mode == AUTO_RESTART)
+ chan->staged_desc = desc;
+ else
+ chan->active_desc = desc;
+}
+
+/**
+ * xilinx_frmbuf_issue_pending - Issue pending transactions
+ * @dchan: DMA channel
+ */
+static void xilinx_frmbuf_issue_pending(struct dma_chan *dchan)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ xilinx_frmbuf_start_transfer(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_frmbuf_reset - Reset frmbuf channel
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_reset(struct xilinx_frmbuf_chan *chan)
+{
+ /* reset ip */
+ gpiod_set_value(chan->xdev->rst_gpio, 1);
+ udelay(1);
+ gpiod_set_value(chan->xdev->rst_gpio, 0);
+}
+
+/**
+ * xilinx_frmbuf_chan_reset - Reset frmbuf channel and enable interrupts
+ * @chan: Driver specific frmbuf channel
+ */
+static void xilinx_frmbuf_chan_reset(struct xilinx_frmbuf_chan *chan)
+{
+ xilinx_frmbuf_reset(chan);
+ frmbuf_write(chan, XILINX_FRMBUF_IE_OFFSET, XILINX_FRMBUF_IE_AP_READY);
+ frmbuf_write(chan, XILINX_FRMBUF_GIE_OFFSET, XILINX_FRMBUF_GIE_EN);
+}
+
+/**
+ * xilinx_frmbuf_irq_handler - frmbuf Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx frmbuf channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t xilinx_frmbuf_irq_handler(int irq, void *data)
+{
+ struct xilinx_frmbuf_chan *chan = data;
+ u32 status;
+ dma_async_tx_callback callback = NULL;
+ void *callback_param;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ status = frmbuf_read(chan, XILINX_FRMBUF_ISR_OFFSET);
+ if (!(status & XILINX_FRMBUF_ISR_ALL_IRQ_MASK))
+ return IRQ_NONE;
+
+ frmbuf_write(chan, XILINX_FRMBUF_ISR_OFFSET,
+ status & XILINX_FRMBUF_ISR_ALL_IRQ_MASK);
+
+ /* Check if callback function needs to be called early */
+ desc = chan->staged_desc;
+ if (desc && desc->earlycb == EARLY_CALLBACK) {
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ callback(callback_param);
+ desc->async_tx.callback = NULL;
+ }
+ }
+
+ if (status & XILINX_FRMBUF_ISR_AP_READY_IRQ) {
+ spin_lock(&chan->lock);
+ chan->idle = true;
+ if (chan->active_desc) {
+ xilinx_frmbuf_complete_descriptor(chan);
+ chan->active_desc = NULL;
+ }
+ xilinx_frmbuf_start_transfer(chan);
+ spin_unlock(&chan->lock);
+ }
+
+ tasklet_schedule(&chan->tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_frmbuf_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor
+ *
+ * Return: cookie value on success and failure value on error
+ */
+static dma_cookie_t xilinx_frmbuf_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc = to_dma_tx_descriptor(tx);
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&desc->node, &chan->pending_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/**
+ * xilinx_frmbuf_dma_prep_interleaved - prepare a descriptor for a
+ * DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_frmbuf_dma_prep_interleaved(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_frmbuf_tx_descriptor *desc;
+ struct xilinx_frmbuf_desc_hw *hw;
+ u32 vsize, hsize;
+
+ if (chan->direction != xt->dir || !chan->vid_fmt)
+ goto error;
+
+ if (!xt->numf || !xt->sgl[0].size)
+ goto error;
+
+ if (xt->frame_size != chan->vid_fmt->num_planes)
+ goto error;
+
+ vsize = xt->numf;
+ hsize = (xt->sgl[0].size * chan->vid_fmt->ppw * 8) /
+ chan->vid_fmt->bpw;
+ /* hsize calc should not have resulted in an odd number */
+ if (hsize & 1)
+ hsize++;
+
+ if (vsize > chan->xdev->max_height || hsize > chan->xdev->max_width) {
+ dev_dbg(chan->xdev->dev,
+ "vsize %d max vsize %d hsize %d max hsize %d\n",
+ vsize, chan->xdev->max_height, hsize,
+ chan->xdev->max_width);
+ dev_err(chan->xdev->dev, "Requested size not supported!\n");
+ goto error;
+ }
+
+ desc = xilinx_frmbuf_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_frmbuf_tx_submit;
+ async_tx_ack(&desc->async_tx);
+
+ hw = &desc->hw;
+ hw->vsize = xt->numf;
+ hw->stride = xt->sgl[0].icg + xt->sgl[0].size;
+ hw->hsize = (xt->sgl[0].size * chan->vid_fmt->ppw * 8) /
+ chan->vid_fmt->bpw;
+
+ /* hsize calc should not have resulted in an odd number */
+ if (hw->hsize & 1)
+ hw->hsize++;
+
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ hw->luma_plane_addr = xt->src_start;
+ if (xt->frame_size == 2)
+ hw->chroma_plane_addr =
+ xt->src_start +
+ xt->numf * hw->stride +
+ xt->sgl[0].src_icg;
+ } else {
+ hw->luma_plane_addr = xt->dst_start;
+ if (xt->frame_size == 2)
+ hw->chroma_plane_addr =
+ xt->dst_start +
+ xt->numf * hw->stride +
+ xt->sgl[0].dst_icg;
+ }
+
+ return &desc->async_tx;
+
+error:
+ dev_err(chan->xdev->dev,
+ "Invalid dma template or missing dma video fmt config\n");
+ return NULL;
+}
+
+/**
+ * xilinx_frmbuf_terminate_all - Halt the channel and free descriptors
+ * @dchan: Driver specific dma channel pointer
+ *
+ * Return: 0
+ */
+static int xilinx_frmbuf_terminate_all(struct dma_chan *dchan)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_frmbuf_halt(chan);
+ xilinx_frmbuf_free_descriptors(chan);
+ /* worst case frame-to-frame boundary; ensure frame output complete */
+ msleep(50);
+
+ if (chan->xdev->cfg->flags & XILINX_FLUSH_PROP) {
+ u8 count;
+
+ /*
+ * Flush the framebuffer FIFO and
+ * wait for max 50ms for flush done
+ */
+ frmbuf_set(chan, XILINX_FRMBUF_CTRL_OFFSET,
+ XILINX_FRMBUF_CTRL_FLUSH);
+ for (count = WAIT_FOR_FLUSH_DONE; count > 0; count--) {
+ if (frmbuf_read(chan, XILINX_FRMBUF_CTRL_OFFSET) &
+ XILINX_FRMBUF_CTRL_FLUSH_DONE)
+ break;
+ usleep_range(2000, 2100);
+ }
+
+ if (!count)
+ dev_err(chan->xdev->dev, "Framebuffer Flush not done!\n");
+ }
+
+ xilinx_frmbuf_chan_reset(chan);
+
+ return 0;
+}
+
+/**
+ * xilinx_frmbuf_synchronize - kill tasklet to stop further descr processing
+ * @dchan: Driver specific dma channel pointer
+ */
+static void xilinx_frmbuf_synchronize(struct dma_chan *dchan)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+
+ tasklet_kill(&chan->tasklet);
+}
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+/**
+ * xilinx_frmbuf_chan_remove - Per Channel remove function
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_chan_remove(struct xilinx_frmbuf_chan *chan)
+{
+ /* Disable all interrupts */
+ frmbuf_clr(chan, XILINX_FRMBUF_IE_OFFSET,
+ XILINX_FRMBUF_ISR_ALL_IRQ_MASK);
+
+ tasklet_kill(&chan->tasklet);
+ list_del(&chan->common.device_node);
+
+ mutex_lock(&frmbuf_chan_list_lock);
+ list_del(&chan->chan_node);
+ mutex_unlock(&frmbuf_chan_list_lock);
+}
+
+/**
+ * xilinx_frmbuf_chan_probe - Per Channel Probing
+ * It get channel features from the device tree entry and
+ * initialize special channel handling routines
+ *
+ * @xdev: Driver specific device structure
+ * @node: Device node
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_frmbuf_chan_probe(struct xilinx_frmbuf_device *xdev,
+ struct device_node *node)
+{
+ struct xilinx_frmbuf_chan *chan;
+ int err;
+ u32 dma_addr_size;
+
+ chan = &xdev->chan;
+
+ chan->dev = xdev->dev;
+ chan->xdev = xdev;
+ chan->idle = true;
+ chan->mode = AUTO_RESTART;
+
+ err = of_property_read_u32(node, "xlnx,dma-addr-width",
+ &dma_addr_size);
+ if (err || (dma_addr_size != 32 && dma_addr_size != 64)) {
+ dev_err(xdev->dev, "missing or invalid addr width dts prop\n");
+ return err;
+ }
+
+ if (dma_addr_size == 64 && sizeof(dma_addr_t) == sizeof(u64))
+ chan->write_addr = writeq_addr;
+ else
+ chan->write_addr = write_addr;
+
+ if (xdev->cfg->flags & XILINX_FID_PROP)
+ chan->hw_fid = of_property_read_bool(node, "xlnx,fid");
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+
+ chan->irq = irq_of_parse_and_map(node, 0);
+ err = devm_request_irq(xdev->dev, chan->irq, xilinx_frmbuf_irq_handler,
+ IRQF_SHARED, "xilinx_framebuffer", chan);
+
+ if (err) {
+ dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
+ return err;
+ }
+
+ tasklet_init(&chan->tasklet, xilinx_frmbuf_do_tasklet,
+ (unsigned long)chan);
+
+ /*
+ * Initialize the DMA channel and add it to the DMA engine channels
+ * list.
+ */
+ chan->common.device = &xdev->common;
+
+ list_add_tail(&chan->common.device_node, &xdev->common.channels);
+
+ mutex_lock(&frmbuf_chan_list_lock);
+ list_add_tail(&chan->chan_node, &frmbuf_chan_list);
+ mutex_unlock(&frmbuf_chan_list_lock);
+
+ xilinx_frmbuf_chan_reset(chan);
+
+ return 0;
+}
+
+/**
+ * xilinx_frmbuf_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_frmbuf_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct xilinx_frmbuf_device *xdev;
+ struct resource *io;
+ enum dma_transfer_direction dma_dir;
+ const struct of_device_id *match;
+ int err;
+ u32 i, j, align, ppc;
+ int hw_vid_fmt_cnt;
+ const char *vid_fmts[ARRAY_SIZE(xilinx_frmbuf_formats)];
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+
+ match = of_match_node(xilinx_frmbuf_of_ids, node);
+ if (!match)
+ return -ENODEV;
+
+ xdev->cfg = match->data;
+
+ dma_dir = (enum dma_transfer_direction)xdev->cfg->direction;
+
+ if (xdev->cfg->flags & XILINX_CLK_PROP) {
+ xdev->ap_clk = devm_clk_get(xdev->dev, "ap_clk");
+ if (IS_ERR(xdev->ap_clk)) {
+ err = PTR_ERR(xdev->ap_clk);
+ dev_err(xdev->dev, "failed to get ap_clk (%d)\n", err);
+ return err;
+ }
+ } else {
+ dev_info(xdev->dev, "assuming clock is enabled!\n");
+ }
+
+ xdev->rst_gpio = devm_gpiod_get(&pdev->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(xdev->rst_gpio)) {
+ err = PTR_ERR(xdev->rst_gpio);
+ if (err == -EPROBE_DEFER)
+ dev_info(&pdev->dev,
+ "Probe deferred due to GPIO reset defer\n");
+ else
+ dev_err(&pdev->dev,
+ "Unable to locate reset property in dt\n");
+ return err;
+ }
+
+ gpiod_set_value_cansleep(xdev->rst_gpio, 0x0);
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->regs = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(xdev->regs))
+ return PTR_ERR(xdev->regs);
+
+ err = of_property_read_u32(node, "xlnx,max-height", &xdev->max_height);
+ if (err < 0) {
+ dev_err(xdev->dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (xdev->max_height > XILINX_FRMBUF_MAX_HEIGHT ||
+ xdev->max_height < XILINX_FRMBUF_MIN_HEIGHT) {
+ dev_err(&pdev->dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(node, "xlnx,max-width", &xdev->max_width);
+ if (err < 0) {
+ dev_err(xdev->dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (xdev->max_width > XILINX_FRMBUF_MAX_WIDTH ||
+ xdev->max_width < XILINX_FRMBUF_MIN_WIDTH) {
+ dev_err(&pdev->dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ /* Initialize the DMA engine */
+ if (xdev->cfg->flags & XILINX_PPC_PROP) {
+ err = of_property_read_u32(node, "xlnx,pixels-per-clock", &ppc);
+ if (err || (ppc != 1 && ppc != 2 && ppc != 4)) {
+ dev_err(&pdev->dev, "missing or invalid pixels per clock dts prop\n");
+ return err;
+ }
+
+ err = of_property_read_u32(node, "xlnx,dma-align", &align);
+ if (err)
+ align = ppc * XILINX_FRMBUF_ALIGN_MUL;
+
+ if (align < (ppc * XILINX_FRMBUF_ALIGN_MUL) ||
+ ffs(align) != fls(align)) {
+ dev_err(&pdev->dev, "invalid dma align dts prop\n");
+ return -EINVAL;
+ }
+ } else {
+ align = 16;
+ }
+
+ xdev->common.copy_align = fls(align) - 1;
+ xdev->common.dev = &pdev->dev;
+
+ if (xdev->cfg->flags & XILINX_CLK_PROP) {
+ err = clk_prepare_enable(xdev->ap_clk);
+ if (err) {
+ dev_err(&pdev->dev, " failed to enable ap_clk (%d)\n",
+ err);
+ return err;
+ }
+ }
+
+ INIT_LIST_HEAD(&xdev->common.channels);
+ dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+
+ /* Initialize the channels */
+ err = xilinx_frmbuf_chan_probe(xdev, node);
+ if (err < 0)
+ goto disable_clk;
+
+ xdev->chan.direction = dma_dir;
+
+ if (xdev->chan.direction == DMA_DEV_TO_MEM) {
+ xdev->common.directions = BIT(DMA_DEV_TO_MEM);
+ dev_info(&pdev->dev, "Xilinx AXI frmbuf DMA_DEV_TO_MEM\n");
+ } else if (xdev->chan.direction == DMA_MEM_TO_DEV) {
+ xdev->common.directions = BIT(DMA_MEM_TO_DEV);
+ dev_info(&pdev->dev, "Xilinx AXI frmbuf DMA_MEM_TO_DEV\n");
+ } else {
+ err = -EINVAL;
+ goto remove_chan;
+ }
+
+ /* read supported video formats and update internal table */
+ hw_vid_fmt_cnt = of_property_count_strings(node, "xlnx,vid-formats");
+
+ err = of_property_read_string_array(node, "xlnx,vid-formats",
+ vid_fmts, hw_vid_fmt_cnt);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Missing or invalid xlnx,vid-formats dts prop\n");
+ goto remove_chan;
+ }
+
+ for (i = 0; i < hw_vid_fmt_cnt; i++) {
+ const char *vid_fmt_name = vid_fmts[i];
+
+ for (j = 0; j < ARRAY_SIZE(xilinx_frmbuf_formats); j++) {
+ const char *dts_name =
+ xilinx_frmbuf_formats[j].dts_name;
+
+ if (strcmp(vid_fmt_name, dts_name))
+ continue;
+
+ xdev->enabled_vid_fmts |=
+ xilinx_frmbuf_formats[j].fmt_bitmask;
+ }
+ }
+
+ /* Determine supported vid framework formats */
+ frmbuf_init_format_array(xdev);
+
+ xdev->common.device_alloc_chan_resources =
+ xilinx_frmbuf_alloc_chan_resources;
+ xdev->common.device_free_chan_resources =
+ xilinx_frmbuf_free_chan_resources;
+ xdev->common.device_prep_interleaved_dma =
+ xilinx_frmbuf_dma_prep_interleaved;
+ xdev->common.device_terminate_all = xilinx_frmbuf_terminate_all;
+ xdev->common.device_synchronize = xilinx_frmbuf_synchronize;
+ xdev->common.device_tx_status = xilinx_frmbuf_tx_status;
+ xdev->common.device_issue_pending = xilinx_frmbuf_issue_pending;
+
+ platform_set_drvdata(pdev, xdev);
+
+ /* Register the DMA engine with the core */
+ dma_async_device_register(&xdev->common);
+
+ err = of_dma_controller_register(node, of_dma_xilinx_xlate, xdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Unable to register DMA to DT\n");
+ goto error;
+ }
+
+ dev_info(&pdev->dev, "Xilinx AXI FrameBuffer Engine Driver Probed!!\n");
+
+ return 0;
+error:
+ dma_async_device_unregister(&xdev->common);
+remove_chan:
+ xilinx_frmbuf_chan_remove(&xdev->chan);
+disable_clk:
+ clk_disable_unprepare(xdev->ap_clk);
+ return err;
+}
+
+/**
+ * xilinx_frmbuf_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int xilinx_frmbuf_remove(struct platform_device *pdev)
+{
+ struct xilinx_frmbuf_device *xdev = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&xdev->common);
+ xilinx_frmbuf_chan_remove(&xdev->chan);
+ clk_disable_unprepare(xdev->ap_clk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, xilinx_frmbuf_of_ids);
+
+static struct platform_driver xilinx_frmbuf_driver = {
+ .driver = {
+ .name = "xilinx-frmbuf",
+ .of_match_table = xilinx_frmbuf_of_ids,
+ },
+ .probe = xilinx_frmbuf_probe,
+ .remove = xilinx_frmbuf_remove,
+};
+
+module_platform_driver(xilinx_frmbuf_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx Framebuffer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_ps_pcie.h b/drivers/dma/xilinx/xilinx_ps_pcie.h
new file mode 100644
index 000000000000..81d634d15447
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_ps_pcie.h
@@ -0,0 +1,44 @@
+/*
+ * Xilinx PS PCIe DMA Engine platform header file
+ *
+ * Copyright (C) 2010-2017 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#ifndef __XILINX_PS_PCIE_H
+#define __XILINX_PS_PCIE_H
+
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irqreturn.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/timer.h>
+#include <linux/dma/xilinx_ps_pcie_dma.h>
+
+/**
+ * dma_platform_driver_register - This will be invoked by module init
+ *
+ * Return: returns status of platform_driver_register
+ */
+int dma_platform_driver_register(void);
+/**
+ * dma_platform_driver_unregister - This will be invoked by module exit
+ *
+ * Return: returns void after unregustering platform driver
+ */
+void dma_platform_driver_unregister(void);
+
+#endif
diff --git a/drivers/dma/xilinx/xilinx_ps_pcie_dma_client.c b/drivers/dma/xilinx/xilinx_ps_pcie_dma_client.c
new file mode 100644
index 000000000000..2996133837f0
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_ps_pcie_dma_client.c
@@ -0,0 +1,1402 @@
+/*
+ * XILINX PS PCIe DMA Engine test module
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cdev.h>
+#include <linux/dma-direction.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/dma/xilinx_ps_pcie_dma.h>
+
+#include "../dmaengine.h"
+
+#define DRV_MODULE_NAME "ps_pcie_dma_client"
+
+#define DMA_SCRATCH0_REG_OFFSET (0x50)
+#define DMA_SCRATCH1_REG_OFFSET (0x54)
+#define DMA_AXI_INTR_ASSRT_REG_OFFSET (0x74)
+
+#define DMA_SW_INTR_ASSRT_BIT BIT(3)
+
+#define DMA_BAR_NUMBER 0
+
+#define CHAR_DRIVER_NAME "ps_pcie_dmachan"
+
+#define PIO_CHAR_DRIVER_NAME "ps_pcie_pio"
+#define EP_TRANSLATION_CHECK 0xCCCCCCCC
+
+#define PIO_MEMORY_BAR_NUMBER 2
+
+#define XPIO_CLIENT_MAGIC 'P'
+#define IOCTL_EP_CHECK_TRANSLATION _IO(XPIO_CLIENT_MAGIC, 0x01)
+
+#define XPS_PCIE_DMA_CLIENT_MAGIC 'S'
+
+#define IGET_ASYNC_TRANSFERINFO _IO(XPS_PCIE_DMA_CLIENT_MAGIC, 0x01)
+#define ISET_ASYNC_TRANSFERINFO _IO(XPS_PCIE_DMA_CLIENT_MAGIC, 0x02)
+
+#define DMA_TRANSACTION_SUCCESSFUL 1
+#define DMA_TRANSACTION_FAILURE 0
+
+#define MAX_LIST 1024
+
+struct dma_transfer_info {
+ char __user *buff_address;
+ unsigned int buff_size;
+ loff_t offset;
+ enum dma_data_direction direction;
+};
+
+struct buff_info {
+ bool status;
+ unsigned int buff_size;
+ char __user *buff_address;
+};
+
+struct usrbuff_info {
+ struct buff_info buff_list[MAX_LIST];
+ unsigned int expected;
+};
+
+enum pio_status {
+ PIO_SUPPORTED = 0,
+ PIO_NOT_SUPPORTED
+};
+
+enum dma_transfer_mode {
+ MEMORY_MAPPED = 0,
+ STREAMING
+};
+
+struct dma_deviceproperties {
+ u16 pci_vendorid;
+ u16 pci_deviceid;
+ u16 board_number;
+ enum pio_status pio_transfers;
+ enum dma_transfer_mode mode;
+ enum dma_data_direction direction[MAX_ALLOWED_CHANNELS_IN_HW];
+};
+
+struct xlnx_completed_info {
+ struct list_head clist;
+ struct buff_info buffer;
+};
+
+struct xlnx_ps_pcie_dma_client_channel {
+ struct device *dev;
+ struct dma_chan *chan;
+ struct ps_pcie_dma_channel_match match;
+ enum dma_data_direction direction;
+ enum dma_transfer_mode mode;
+ struct xlnx_completed_info completed;
+ spinlock_t channel_lock; /* Lock to serialize transfers on channel */
+};
+
+struct xlnx_ps_pcie_dma_client_device {
+ struct dma_deviceproperties *properties;
+
+ struct xlnx_ps_pcie_dma_client_channel
+ pcie_dma_chan[MAX_ALLOWED_CHANNELS_IN_HW];
+
+ dev_t char_device;
+ struct cdev xps_pcie_chardev;
+ struct device *chardev[MAX_ALLOWED_CHANNELS_IN_HW];
+
+ dev_t pio_char_device;
+ struct cdev xpio_char_dev;
+ struct device *xpio_char_device;
+ struct mutex pio_chardev_mutex; /* Exclusive access to ioctl */
+ struct completion trans_cmpltn;
+ u32 pio_translation_size;
+
+ struct list_head dev_node;
+};
+
+struct xlnx_ps_pcie_dma_asynchronous_transaction {
+ dma_cookie_t cookie;
+ struct page **cache_pages;
+ unsigned int num_pages;
+ struct sg_table *sg;
+ struct xlnx_ps_pcie_dma_client_channel *chan;
+ struct xlnx_completed_info *buffer_info;
+ struct dma_async_tx_descriptor **txd;
+};
+
+static struct class *g_ps_pcie_dma_client_class; /* global device class */
+static struct list_head g_ps_pcie_dma_client_list;
+
+/*
+ * Keep adding to this list to interact with multiple DMA devices
+ */
+static struct dma_deviceproperties g_dma_deviceproperties_list[] = {
+ {
+ .pci_vendorid = PCI_VENDOR_ID_XILINX,
+ .pci_deviceid = ZYNQMP_DMA_DEVID,
+ .board_number = 0,
+ .pio_transfers = PIO_SUPPORTED,
+ .mode = MEMORY_MAPPED,
+ /* Make sure the channel direction is same
+ * as what is configured in DMA device
+ */
+ .direction = {DMA_TO_DEVICE, DMA_FROM_DEVICE,
+ DMA_TO_DEVICE, DMA_FROM_DEVICE}
+ }
+};
+
+/**
+ * ps_pcie_dma_sync_transfer_cbk - Callback handler for Synchronous transfers.
+ * Handles both S2C and C2S transfer call backs.
+ * Indicates to blocked applications that DMA transfers are complete
+ *
+ * @data: Callback parameter
+ *
+ * Return: void
+ */
+static void ps_pcie_dma_sync_transfer_cbk(void *data)
+{
+ struct completion *compl = (struct completion *)data;
+
+ if (compl)
+ complete(compl);
+}
+
+/**
+ * initiate_sync_transfer - Programs both Source Q
+ * and Destination Q of channel after setting up sg lists and transaction
+ * specific data. This functions waits until transaction completion is notified
+ *
+ * @channel: Pointer to the PS PCIe DMA channel structure
+ * @buffer: User land virtual address containing data to be sent or received
+ * @length: Length of user land buffer
+ * @f_offset: AXI domain address to which data pointed by user buffer has to
+ * be sent/received from
+ * @direction: Transfer of data direction
+ *
+ * Return: 0 on success and non zero value for failure
+ */
+static ssize_t initiate_sync_transfer(
+ struct xlnx_ps_pcie_dma_client_channel *channel,
+ const char __user *buffer, size_t length,
+ loff_t *f_offset, enum dma_data_direction direction)
+{
+ int offset;
+ unsigned int alloc_pages;
+ unsigned long first, last, nents = 1;
+ struct page **cache_pages;
+ struct dma_chan *chan = NULL;
+ struct dma_device *device;
+ struct dma_async_tx_descriptor **txd = NULL;
+ dma_cookie_t cookie = 0;
+ enum dma_ctrl_flags flags = 0;
+ int err;
+ struct sg_table *sg;
+ enum dma_transfer_direction d_direction;
+ int i;
+ struct completion *cmpl_ptr;
+ enum dma_status status;
+ struct scatterlist *selem;
+ size_t elem_len = 0;
+
+ chan = channel->chan;
+ device = chan->device;
+
+ offset = offset_in_page(buffer);
+ first = ((unsigned long)buffer & PAGE_MASK) >> PAGE_SHIFT;
+ last = (((unsigned long)buffer + length - 1) & PAGE_MASK) >>
+ PAGE_SHIFT;
+ alloc_pages = (last - first) + 1;
+
+ cache_pages = devm_kzalloc(channel->dev,
+ (alloc_pages * (sizeof(struct page *))),
+ GFP_ATOMIC);
+ if (!cache_pages) {
+ dev_err(channel->dev,
+ "Unable to allocate memory for page table holder\n");
+ err = PTR_ERR(cache_pages);
+ goto err_out_cachepages_alloc;
+ }
+
+ err = get_user_pages_fast((unsigned long)buffer, alloc_pages,
+ !(direction), cache_pages);
+ if (err <= 0) {
+ dev_err(channel->dev, "Unable to pin user pages\n");
+ err = PTR_ERR(cache_pages);
+ goto err_out_pin_pages;
+ } else if (err < alloc_pages) {
+ dev_err(channel->dev, "Only pinned few user pages %d\n", err);
+ err = PTR_ERR(cache_pages);
+ for (i = 0; i < err; i++)
+ put_page(cache_pages[i]);
+ goto err_out_pin_pages;
+ }
+
+ sg = devm_kzalloc(channel->dev, sizeof(struct sg_table), GFP_ATOMIC);
+ if (!sg) {
+ err = PTR_ERR(sg);
+ goto err_out_alloc_sg_table;
+ }
+
+ err = sg_alloc_table_from_pages(sg, cache_pages, alloc_pages, offset,
+ length, GFP_ATOMIC);
+ if (err < 0) {
+ dev_err(channel->dev, "Unable to create sg table\n");
+ goto err_out_sg_to_sgl;
+ }
+
+ err = dma_map_sg(channel->dev, sg->sgl, sg->nents, direction);
+ if (err == 0) {
+ dev_err(channel->dev, "Unable to map buffer to sg table\n");
+ err = PTR_ERR(sg);
+ goto err_out_dma_map_sg;
+ }
+
+ cmpl_ptr = devm_kzalloc(channel->dev, sizeof(struct completion),
+ GFP_ATOMIC);
+ if (!cmpl_ptr) {
+ err = PTR_ERR(cmpl_ptr);
+ goto err_out_cmpl_ptr;
+ }
+
+ init_completion(cmpl_ptr);
+
+ if (channel->mode == MEMORY_MAPPED)
+ nents = sg->nents;
+
+ txd = devm_kzalloc(channel->dev, sizeof(*txd)
+ * nents, GFP_ATOMIC);
+ if (!txd) {
+ err = PTR_ERR(txd);
+ goto err_out_cmpl_ptr;
+ }
+
+ if (channel->mode == MEMORY_MAPPED) {
+ for (i = 0, selem = (sg->sgl); i < sg->nents; i++,
+ selem = sg_next(selem)) {
+ if ((i + 1) == sg->nents)
+ flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+ if (direction == DMA_TO_DEVICE) {
+ txd[i] = device->device_prep_dma_memcpy(chan,
+ (dma_addr_t)(*f_offset) + elem_len,
+ selem->dma_address, selem->length,
+ flags);
+ } else {
+ txd[i] = device->device_prep_dma_memcpy(chan,
+ selem->dma_address,
+ (dma_addr_t)(*f_offset) + elem_len,
+ selem->length, flags);
+ }
+
+ elem_len += selem->length;
+
+ if (!txd[i]) {
+ err = PTR_ERR(txd[i]);
+ goto err_out_no_prep_sg_async_desc;
+ }
+ }
+ } else {
+ if (direction == DMA_TO_DEVICE)
+ d_direction = DMA_MEM_TO_DEV;
+ else
+ d_direction = DMA_DEV_TO_MEM;
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ txd[0] = device->device_prep_slave_sg(chan, sg->sgl, sg->nents,
+ d_direction, flags, NULL);
+ if (!txd[0]) {
+ err = PTR_ERR(txd[0]);
+ goto err_out_no_slave_sg_async_descriptor;
+ }
+ }
+
+ if (channel->mode == MEMORY_MAPPED) {
+ for (i = 0; i < sg->nents; i++) {
+ if ((i + 1) == sg->nents) {
+ txd[i]->callback =
+ ps_pcie_dma_sync_transfer_cbk;
+ txd[i]->callback_param = cmpl_ptr;
+ }
+
+ cookie = txd[i]->tx_submit(txd[i]);
+ if (dma_submit_error(cookie)) {
+ err = (int)cookie;
+ dev_err(channel->dev,
+ "Unable to submit transaction\n");
+ goto free_transaction;
+ }
+ }
+ } else {
+ txd[0]->callback = ps_pcie_dma_sync_transfer_cbk;
+ txd[0]->callback_param = cmpl_ptr;
+
+ cookie = txd[0]->tx_submit(txd[0]);
+ if (dma_submit_error(cookie)) {
+ err = (int)cookie;
+ dev_err(channel->dev,
+ "Unable to submit transaction\n");
+ goto free_transaction;
+ }
+ }
+
+ dma_async_issue_pending(chan);
+
+ wait_for_completion_killable(cmpl_ptr);
+
+ status = dmaengine_tx_status(chan, cookie, NULL);
+ if (status == DMA_COMPLETE)
+ err = length;
+ else
+ err = -1;
+
+ dma_unmap_sg(channel->dev, sg->sgl, sg->nents, direction);
+ devm_kfree(channel->dev, cmpl_ptr);
+ devm_kfree(channel->dev, txd);
+ sg_free_table(sg);
+ devm_kfree(channel->dev, sg);
+ for (i = 0; i < alloc_pages; i++)
+ put_page(cache_pages[i]);
+ devm_kfree(channel->dev, cache_pages);
+
+ return (ssize_t)err;
+
+free_transaction:
+err_out_no_prep_sg_async_desc:
+err_out_no_slave_sg_async_descriptor:
+ devm_kfree(channel->dev, cmpl_ptr);
+ devm_kfree(channel->dev, txd);
+err_out_cmpl_ptr:
+ dma_unmap_sg(channel->dev, sg->sgl, sg->nents, direction);
+err_out_dma_map_sg:
+ sg_free_table(sg);
+err_out_sg_to_sgl:
+ devm_kfree(channel->dev, sg);
+err_out_alloc_sg_table:
+ for (i = 0; i < alloc_pages; i++)
+ put_page(cache_pages[i]);
+err_out_pin_pages:
+ devm_kfree(channel->dev, cache_pages);
+err_out_cachepages_alloc:
+
+ return (ssize_t)err;
+}
+
+static ssize_t
+ps_pcie_dma_read(struct file *file,
+ char __user *buffer,
+ size_t length,
+ loff_t *f_offset)
+{
+ struct xlnx_ps_pcie_dma_client_channel *chan;
+ ssize_t ret;
+
+ chan = file->private_data;
+
+ if (chan->direction != DMA_FROM_DEVICE) {
+ dev_err(chan->dev, "Invalid data direction for channel\n");
+ ret = -EINVAL;
+ goto c2s_err_direction;
+ }
+
+ ret = initiate_sync_transfer(chan, buffer, length, f_offset,
+ DMA_FROM_DEVICE);
+
+ if (ret != length)
+ dev_dbg(chan->dev, "Read synchronous transfer unsuccessful\n");
+
+c2s_err_direction:
+ return ret;
+}
+
+static ssize_t
+ps_pcie_dma_write(struct file *file,
+ const char __user *buffer,
+ size_t length,
+ loff_t *f_offset)
+{
+ struct xlnx_ps_pcie_dma_client_channel *chan;
+ ssize_t ret;
+
+ chan = file->private_data;
+
+ if (chan->direction != DMA_TO_DEVICE) {
+ dev_err(chan->dev,
+ "Invalid data direction for channel\n");
+ ret = -EINVAL;
+ goto s2c_err_direction;
+ }
+
+ ret = initiate_sync_transfer(chan, buffer, length, f_offset,
+ DMA_TO_DEVICE);
+
+ if (ret != length)
+ dev_dbg(chan->dev, "Write synchronous transfer unsuccessful\n");
+
+s2c_err_direction:
+ return ret;
+}
+
+static int ps_pcie_dma_open(struct inode *in, struct file *file)
+{
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ int minor_num = iminor(in);
+
+ xdev = container_of(in->i_cdev,
+ struct xlnx_ps_pcie_dma_client_device,
+ xps_pcie_chardev);
+
+ file->private_data = &xdev->pcie_dma_chan[minor_num];
+
+ return 0;
+}
+
+static int ps_pcie_dma_release(struct inode *in, struct file *filp)
+{
+ return 0;
+}
+
+static int update_completed_info(struct xlnx_ps_pcie_dma_client_channel *chan,
+ struct usrbuff_info *usr_buff)
+{
+ int retval = 0;
+ unsigned int expected, count = 0;
+ struct xlnx_completed_info *entry;
+ struct xlnx_completed_info *next;
+
+ if (list_empty(&chan->completed.clist))
+ goto update_expected;
+
+ if (copy_from_user((void *)&expected,
+ (void __user *)&usr_buff->expected,
+ sizeof(unsigned int)) != 0) {
+ pr_err("Expected count copy failure\n");
+ retval = -ENXIO;
+ return retval;
+ }
+
+ if (expected > MAX_LIST) {
+ retval = -ENXIO;
+ return retval;
+ }
+
+ list_for_each_entry_safe(entry, next, &chan->completed.clist, clist) {
+ if (copy_to_user((void __user *)(usr_buff->buff_list + count),
+ (void *)&entry->buffer,
+ sizeof(struct buff_info)) != 0) {
+ pr_err("update user completed count copy failed\n");
+ retval = -ENXIO;
+ break;
+ }
+ count++;
+ spin_lock(&chan->channel_lock);
+ list_del(&entry->clist);
+ spin_unlock(&chan->channel_lock);
+ devm_kfree(chan->dev, entry);
+ if (count == expected)
+ break;
+ }
+
+update_expected:
+ if (copy_to_user((void __user *)&usr_buff->expected, (void *)&count,
+ (sizeof(unsigned int))) != 0) {
+ pr_err("update user expected count copy failure\n");
+ retval = -ENXIO;
+ }
+
+ return retval;
+}
+
+/**
+ * ps_pcie_dma_async_transfer_cbk - Callback handler for Asynchronous transfers.
+ * Handles both S2C and C2S transfer call backs. Stores transaction information
+ * in a list for a user application to poll for this information
+ *
+ * @data: Callback parameter
+ *
+ * Return: void
+ */
+static void ps_pcie_dma_async_transfer_cbk(void *data)
+{
+ struct xlnx_ps_pcie_dma_asynchronous_transaction *trans =
+ (struct xlnx_ps_pcie_dma_asynchronous_transaction *)data;
+ enum dma_status status;
+ struct dma_tx_state state;
+ unsigned int i;
+
+ dma_unmap_sg(trans->chan->dev, trans->sg->sgl, trans->sg->nents,
+ trans->chan->direction);
+ sg_free_table(trans->sg);
+ devm_kfree(trans->chan->dev, trans->sg);
+ devm_kfree(trans->chan->dev, trans->txd);
+ for (i = 0; i < trans->num_pages; i++)
+ put_page(trans->cache_pages[i]);
+ devm_kfree(trans->chan->dev, trans->cache_pages);
+
+ status = dmaengine_tx_status(trans->chan->chan, trans->cookie, &state);
+
+ if (status == DMA_COMPLETE)
+ trans->buffer_info->buffer.status = DMA_TRANSACTION_SUCCESSFUL;
+ else
+ trans->buffer_info->buffer.status = DMA_TRANSACTION_SUCCESSFUL;
+
+ spin_lock(&trans->chan->channel_lock);
+ list_add_tail(&trans->buffer_info->clist,
+ &trans->chan->completed.clist);
+ spin_unlock(&trans->chan->channel_lock);
+ devm_kfree(trans->chan->dev, trans);
+}
+
+/**
+ * initiate_async_transfer - Programs both Source Q
+ * and Destination Q of channel after setting up sg lists and transaction
+ * specific data. This functions returns after setting up transfer
+ *
+ * @channel: Pointer to the PS PCIe DMA channel structure
+ * @buffer: User land virtual address containing data to be sent or received
+ * @length: Length of user land buffer
+ * @f_offset: AXI domain address to which data pointed by user buffer has to
+ * be sent/received from
+ * @direction: Transfer of data direction
+ *
+ * Return: 0 on success and non zero value for failure
+ */
+static int initiate_async_transfer(
+ struct xlnx_ps_pcie_dma_client_channel *channel,
+ char __user *buffer, size_t length, loff_t *f_offset,
+ enum dma_data_direction direction)
+{
+ int offset;
+ unsigned int alloc_pages;
+ unsigned long first, last, nents = 1;
+ struct page **cache_pages;
+ struct dma_chan *chan = NULL;
+ struct dma_device *device;
+ struct dma_async_tx_descriptor **txd = NULL;
+ dma_cookie_t cookie;
+ enum dma_ctrl_flags flags = 0;
+ struct xlnx_ps_pcie_dma_asynchronous_transaction *trans;
+ int err;
+ struct sg_table *sg;
+ enum dma_transfer_direction d_direction;
+ int i;
+ struct scatterlist *selem;
+ size_t elem_len = 0;
+
+ chan = channel->chan;
+ device = chan->device;
+
+ offset = offset_in_page(buffer);
+ first = ((unsigned long)buffer & PAGE_MASK) >> PAGE_SHIFT;
+ last = (((unsigned long)buffer + length - 1) & PAGE_MASK) >>
+ PAGE_SHIFT;
+ alloc_pages = (last - first) + 1;
+
+ cache_pages = devm_kzalloc(channel->dev,
+ (alloc_pages * (sizeof(struct page *))),
+ GFP_ATOMIC);
+ if (!cache_pages) {
+ err = PTR_ERR(cache_pages);
+ goto err_out_cachepages_alloc;
+ }
+
+ err = get_user_pages_fast((unsigned long)buffer, alloc_pages,
+ !(direction), cache_pages);
+ if (err <= 0) {
+ dev_err(channel->dev, "Unable to pin user pages\n");
+ err = PTR_ERR(cache_pages);
+ goto err_out_pin_pages;
+ } else if (err < alloc_pages) {
+ dev_err(channel->dev, "Only pinned few user pages %d\n", err);
+ err = PTR_ERR(cache_pages);
+ for (i = 0; i < err; i++)
+ put_page(cache_pages[i]);
+ goto err_out_pin_pages;
+ }
+
+ sg = devm_kzalloc(channel->dev, sizeof(struct sg_table), GFP_ATOMIC);
+ if (!sg) {
+ err = PTR_ERR(sg);
+ goto err_out_alloc_sg_table;
+ }
+
+ err = sg_alloc_table_from_pages(sg, cache_pages, alloc_pages, offset,
+ length, GFP_ATOMIC);
+ if (err < 0) {
+ dev_err(channel->dev, "Unable to create sg table\n");
+ goto err_out_sg_to_sgl;
+ }
+
+ err = dma_map_sg(channel->dev, sg->sgl, sg->nents, direction);
+ if (err == 0) {
+ dev_err(channel->dev,
+ "Unable to map user buffer to sg table\n");
+ err = PTR_ERR(sg);
+ goto err_out_dma_map_sg;
+ }
+
+ trans = devm_kzalloc(channel->dev, sizeof(*trans), GFP_ATOMIC);
+ if (!trans) {
+ err = PTR_ERR(trans);
+ goto err_out_trans_ptr;
+ }
+
+ trans->buffer_info = devm_kzalloc(channel->dev,
+ sizeof(struct xlnx_completed_info),
+ GFP_ATOMIC);
+
+ if (!trans->buffer_info) {
+ err = PTR_ERR(trans->buffer_info);
+ goto err_out_no_completion_info;
+ }
+
+ if (channel->mode == MEMORY_MAPPED)
+ nents = sg->nents;
+
+ txd = devm_kzalloc(channel->dev,
+ sizeof(*txd) * nents, GFP_ATOMIC);
+ if (!txd) {
+ err = PTR_ERR(txd);
+ goto err_out_no_completion_info;
+ }
+
+ trans->txd = txd;
+
+ if (channel->mode == MEMORY_MAPPED) {
+ for (i = 0, selem = (sg->sgl); i < sg->nents; i++,
+ selem = sg_next(selem)) {
+ if ((i + 1) == sg->nents)
+ flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+ if (direction == DMA_TO_DEVICE) {
+ txd[i] = device->device_prep_dma_memcpy(chan,
+ (dma_addr_t)(*f_offset) + elem_len,
+ selem->dma_address, selem->length,
+ flags);
+ } else {
+ txd[i] = device->device_prep_dma_memcpy(chan,
+ selem->dma_address,
+ (dma_addr_t)(*f_offset) + elem_len,
+ selem->length, flags);
+ }
+
+ elem_len += selem->length;
+
+ if (!txd[i]) {
+ err = PTR_ERR(txd[i]);
+ goto err_out_no_prep_sg_async_desc;
+ }
+ }
+ } else {
+ if (direction == DMA_TO_DEVICE)
+ d_direction = DMA_MEM_TO_DEV;
+ else
+ d_direction = DMA_DEV_TO_MEM;
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ txd[0] = device->device_prep_slave_sg(chan, sg->sgl, sg->nents,
+ d_direction, flags, NULL);
+ if (!txd[0]) {
+ err = PTR_ERR(txd[0]);
+ goto err_out_no_slave_sg_async_descriptor;
+ }
+ }
+
+ trans->buffer_info->buffer.buff_address = buffer;
+ trans->buffer_info->buffer.buff_size = length;
+ trans->cache_pages = cache_pages;
+ trans->num_pages = alloc_pages;
+ trans->chan = channel;
+ trans->sg = sg;
+
+ if (channel->mode == MEMORY_MAPPED) {
+ for (i = 0; i < sg->nents; i++) {
+ cookie = txd[i]->tx_submit(txd[i]);
+ if (dma_submit_error(cookie)) {
+ err = (int)cookie;
+ dev_err(channel->dev,
+ "Unable to submit transaction\n");
+ goto free_transaction;
+ }
+
+ if ((i + 1) == sg->nents) {
+ txd[i]->callback =
+ ps_pcie_dma_async_transfer_cbk;
+ txd[i]->callback_param = trans;
+ trans->cookie = cookie;
+ }
+ }
+
+ } else {
+ txd[0]->callback = ps_pcie_dma_async_transfer_cbk;
+ txd[0]->callback_param = trans;
+
+ cookie = txd[0]->tx_submit(txd[0]);
+ if (dma_submit_error(cookie)) {
+ err = (int)cookie;
+ dev_err(channel->dev,
+ "Unable to submit transaction\n");
+ goto free_transaction;
+ }
+
+ trans->cookie = cookie;
+ }
+
+ dma_async_issue_pending(chan);
+
+ return length;
+
+free_transaction:
+err_out_no_prep_sg_async_desc:
+err_out_no_slave_sg_async_descriptor:
+ devm_kfree(channel->dev, trans->buffer_info);
+ devm_kfree(channel->dev, txd);
+err_out_no_completion_info:
+ devm_kfree(channel->dev, trans);
+err_out_trans_ptr:
+ dma_unmap_sg(channel->dev, sg->sgl, sg->nents, direction);
+err_out_dma_map_sg:
+ sg_free_table(sg);
+err_out_sg_to_sgl:
+ devm_kfree(channel->dev, sg);
+err_out_alloc_sg_table:
+ for (i = 0; i < alloc_pages; i++)
+ put_page(cache_pages[i]);
+err_out_pin_pages:
+ devm_kfree(channel->dev, cache_pages);
+err_out_cachepages_alloc:
+
+ return err;
+}
+
+static long ps_pcie_dma_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int retval = 0;
+ struct xlnx_ps_pcie_dma_client_channel *chan;
+ struct dma_transfer_info transfer_info;
+
+ if (_IOC_TYPE(cmd) != XPS_PCIE_DMA_CLIENT_MAGIC)
+ return -ENOTTY;
+
+ chan = filp->private_data;
+
+ switch (cmd) {
+ case ISET_ASYNC_TRANSFERINFO:
+ if (copy_from_user((void *)&transfer_info,
+ (void __user *)arg,
+ sizeof(struct dma_transfer_info)) != 0) {
+ pr_err("Copy from user asynchronous params\n");
+ retval = -ENXIO;
+ return retval;
+ }
+ if (transfer_info.direction != chan->direction) {
+ retval = -EINVAL;
+ return retval;
+ }
+ retval = initiate_async_transfer(chan,
+ transfer_info.buff_address,
+ transfer_info.buff_size,
+ &transfer_info.offset,
+ transfer_info.direction);
+ break;
+ case IGET_ASYNC_TRANSFERINFO:
+ retval = update_completed_info(chan,
+ (struct usrbuff_info *)arg);
+ break;
+ default:
+ pr_err("Unsupported ioctl command received\n");
+ retval = -1;
+ }
+
+ return (long)retval;
+}
+
+static const struct file_operations ps_pcie_dma_comm_fops = {
+ .owner = THIS_MODULE,
+ .read = ps_pcie_dma_read,
+ .write = ps_pcie_dma_write,
+ .unlocked_ioctl = ps_pcie_dma_ioctl,
+ .open = ps_pcie_dma_open,
+ .release = ps_pcie_dma_release,
+};
+
+static void pio_sw_intr_cbk(void *data)
+{
+ struct completion *compl = (struct completion *)data;
+
+ if (compl)
+ complete(compl);
+}
+
+static long pio_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ char *bar_memory = NULL;
+ u32 translation_size = 0;
+ long err = 0;
+ struct dma_async_tx_descriptor *intr_txd = NULL;
+ dma_cookie_t cookie;
+ struct dma_chan *chan = NULL;
+ struct dma_device *device;
+ enum dma_ctrl_flags flags;
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+ struct BAR_PARAMS *barinfo;
+
+ xdev = filp->private_data;
+ chan = xdev->pcie_dma_chan[0].chan;
+ device = chan->device;
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ xlnx_match =
+ (struct ps_pcie_dma_channel_match *)chan->private;
+
+ barinfo = ((struct BAR_PARAMS *)(xlnx_match->bar_params) +
+ DMA_BAR_NUMBER);
+ bar_memory = (__force char *)barinfo->BAR_VIRT_ADDR;
+
+ xdev = filp->private_data;
+
+ switch (cmd) {
+ case IOCTL_EP_CHECK_TRANSLATION:
+
+ mutex_lock(&xdev->pio_chardev_mutex);
+ reinit_completion(&xdev->trans_cmpltn);
+
+ intr_txd = device->device_prep_dma_interrupt(chan, flags);
+ if (!intr_txd) {
+ err = -EAGAIN;
+ mutex_unlock(&xdev->pio_chardev_mutex);
+ return err;
+ }
+
+ intr_txd->callback = pio_sw_intr_cbk;
+ intr_txd->callback_param = &xdev->trans_cmpltn;
+
+ cookie = intr_txd->tx_submit(intr_txd);
+ if (dma_submit_error(cookie)) {
+ err = cookie;
+ pr_err("Unable to submit interrupt transaction\n");
+ mutex_unlock(&xdev->pio_chardev_mutex);
+ return err;
+ }
+
+ dma_async_issue_pending(chan);
+
+ iowrite32(EP_TRANSLATION_CHECK, (void __iomem *)(bar_memory +
+ DMA_SCRATCH0_REG_OFFSET));
+ iowrite32(DMA_SW_INTR_ASSRT_BIT, (void __iomem *)(bar_memory +
+ DMA_AXI_INTR_ASSRT_REG_OFFSET));
+
+ wait_for_completion_interruptible(&xdev->trans_cmpltn);
+ translation_size = ioread32((void __iomem *)bar_memory +
+ DMA_SCRATCH1_REG_OFFSET);
+ if (translation_size > 0)
+ xdev->pio_translation_size = translation_size;
+ else
+ err = -EAGAIN;
+ iowrite32(0, (void __iomem *)(bar_memory +
+ DMA_SCRATCH1_REG_OFFSET));
+ mutex_unlock(&xdev->pio_chardev_mutex);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static ssize_t
+pio_read(struct file *file, char __user *buffer, size_t length,
+ loff_t *f_offset)
+{
+ char *bar_memory = NULL;
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+ ssize_t num_bytes = 0;
+ struct BAR_PARAMS *barinfo;
+
+ xdev = file->private_data;
+ xlnx_match = (struct ps_pcie_dma_channel_match *)
+ xdev->pcie_dma_chan[0].chan->private;
+
+ barinfo = ((struct BAR_PARAMS *)(xlnx_match->bar_params) +
+ PIO_MEMORY_BAR_NUMBER);
+ bar_memory = (__force char *)barinfo->BAR_VIRT_ADDR;
+
+ if (length > xdev->pio_translation_size) {
+ pr_err("Error! Invalid buffer length supplied at PIO read\n");
+ num_bytes = -1;
+ return num_bytes;
+ }
+
+ if ((length + *f_offset)
+ > xdev->pio_translation_size) {
+ pr_err("Error! Invalid buffer offset supplied at PIO read\n");
+ num_bytes = -1;
+ return num_bytes;
+ }
+
+ bar_memory += *f_offset;
+
+ num_bytes = copy_to_user(buffer, bar_memory, length);
+ if (num_bytes != 0) {
+ pr_err("Error! copy_to_user failed at PIO read\n");
+ num_bytes = length - num_bytes;
+ } else {
+ num_bytes = length;
+ }
+
+ return num_bytes;
+}
+
+static ssize_t
+pio_write(struct file *file, const char __user *buffer,
+ size_t length, loff_t *f_offset)
+{
+ char *bar_memory = NULL;
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+ ssize_t num_bytes = 0;
+ struct BAR_PARAMS *barinfo;
+
+ xdev = file->private_data;
+ xlnx_match = (struct ps_pcie_dma_channel_match *)
+ xdev->pcie_dma_chan[0].chan->private;
+
+ barinfo = ((struct BAR_PARAMS *)(xlnx_match->bar_params) +
+ PIO_MEMORY_BAR_NUMBER);
+ bar_memory = (__force char *)barinfo->BAR_VIRT_ADDR;
+
+ if (length > xdev->pio_translation_size) {
+ pr_err("Error! Invalid buffer length supplied at PIO write\n");
+ num_bytes = -1;
+ return num_bytes;
+ }
+
+ if ((length + *f_offset)
+ > xdev->pio_translation_size) {
+ pr_err("Error! Invalid buffer offset supplied at PIO write\n");
+ num_bytes = -1;
+ return num_bytes;
+ }
+
+ bar_memory += *f_offset;
+
+ num_bytes = copy_from_user(bar_memory, buffer, length);
+
+ if (num_bytes != 0) {
+ pr_err("Error! copy_from_user failed at PIO write\n");
+ num_bytes = length - num_bytes;
+ } else {
+ num_bytes = length;
+ }
+
+ return num_bytes;
+}
+
+static int pio_open(struct inode *in, struct file *file)
+{
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+
+ xdev = container_of(in->i_cdev,
+ struct xlnx_ps_pcie_dma_client_device,
+ xpio_char_dev);
+
+ file->private_data = xdev;
+
+ return 0;
+}
+
+static int pio_release(struct inode *in, struct file *filp)
+{
+ return 0;
+}
+
+static const struct file_operations ps_pcie_pio_fops = {
+ .owner = THIS_MODULE,
+ .read = pio_read,
+ .write = pio_write,
+ .unlocked_ioctl = pio_ioctl,
+ .open = pio_open,
+ .release = pio_release,
+};
+
+static void destroy_char_iface_for_pio(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ device_destroy(g_ps_pcie_dma_client_class,
+ MKDEV(MAJOR(xdev->pio_char_device), 0));
+ cdev_del(&xdev->xpio_char_dev);
+ unregister_chrdev_region(xdev->pio_char_device, 1);
+}
+
+static void destroy_char_iface_for_dma(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int i;
+ struct xlnx_completed_info *entry, *next;
+
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++) {
+ list_for_each_entry_safe(entry, next,
+ &xdev->pcie_dma_chan[i].completed.clist,
+ clist) {
+ spin_lock(&xdev->pcie_dma_chan[i].channel_lock);
+ list_del(&entry->clist);
+ spin_unlock(&xdev->pcie_dma_chan[i].channel_lock);
+ kfree(entry);
+ }
+ device_destroy(g_ps_pcie_dma_client_class,
+ MKDEV(MAJOR(xdev->char_device), i));
+ }
+ cdev_del(&xdev->xps_pcie_chardev);
+ unregister_chrdev_region(xdev->char_device, MAX_ALLOWED_CHANNELS_IN_HW);
+}
+
+static void delete_char_dev_interfaces(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ destroy_char_iface_for_dma(xdev);
+ if (xdev->properties->pio_transfers == PIO_SUPPORTED)
+ destroy_char_iface_for_pio(xdev);
+}
+
+static void release_dma_channels(struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int i;
+
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++)
+ dma_release_channel(xdev->pcie_dma_chan[i].chan);
+}
+
+static void delete_char_devices(void)
+{
+ struct xlnx_ps_pcie_dma_client_device *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &g_ps_pcie_dma_client_list,
+ dev_node) {
+ list_del(&entry->dev_node);
+ delete_char_dev_interfaces(entry);
+ release_dma_channels(entry);
+ kfree(entry);
+ }
+}
+
+static bool ps_pcie_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct ps_pcie_dma_channel_match *client_match =
+ (struct ps_pcie_dma_channel_match *)param;
+
+ struct ps_pcie_dma_channel_match *dma_channel_match =
+ (struct ps_pcie_dma_channel_match *)chan->private;
+
+ if (client_match && dma_channel_match) {
+ if (client_match->pci_vendorid != 0 &&
+ dma_channel_match->pci_vendorid != 0) {
+ if (client_match->pci_vendorid == dma_channel_match->pci_vendorid) {
+ if (client_match->pci_deviceid == dma_channel_match->pci_deviceid &&
+ client_match->channel_number == dma_channel_match->channel_number &&
+ client_match->direction == dma_channel_match->direction) {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+static int acquire_dma_channels(struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int err;
+ int i;
+ dma_cap_mask_t mask;
+ struct ps_pcie_dma_channel_match *match;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE | DMA_PRIVATE, mask);
+
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++) {
+ match = &xdev->pcie_dma_chan[i].match;
+ match->board_number = xdev->properties->board_number;
+ match->pci_deviceid = xdev->properties->pci_deviceid;
+ match->pci_vendorid = xdev->properties->pci_vendorid;
+ match->channel_number = i;
+ match->direction = xdev->properties->direction[i];
+
+ xdev->pcie_dma_chan[i].chan =
+ dma_request_channel(mask, ps_pcie_dma_filter, match);
+
+ if (!xdev->pcie_dma_chan[i].chan) {
+ pr_err("Error channel handle %d board %d channel\n",
+ match->board_number,
+ match->channel_number);
+ err = -EINVAL;
+ goto err_out_no_channels;
+ }
+ xdev->pcie_dma_chan[i].dev =
+ xdev->pcie_dma_chan[i].chan->device->dev;
+ xdev->pcie_dma_chan[i].direction =
+ xdev->properties->direction[i];
+ xdev->pcie_dma_chan[i].mode =
+ xdev->properties->mode;
+ INIT_LIST_HEAD(&xdev->pcie_dma_chan[i].completed.clist);
+ spin_lock_init(&xdev->pcie_dma_chan[i].channel_lock);
+ }
+
+ return 0;
+
+err_out_no_channels:
+ while (i > 0) {
+ i--;
+ dma_release_channel(xdev->pcie_dma_chan[i].chan);
+ }
+ return err;
+}
+
+static int create_char_dev_iface_for_dma_device(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int err = 0;
+ int i;
+
+ WARN_ON(!xdev);
+
+ err = alloc_chrdev_region(&xdev->char_device, 0,
+ MAX_ALLOWED_CHANNELS_IN_HW,
+ CHAR_DRIVER_NAME);
+ if (err < 0) {
+ pr_err("Unable to allocate char device region\n");
+ return err;
+ }
+
+ xdev->xps_pcie_chardev.owner = THIS_MODULE;
+ cdev_init(&xdev->xps_pcie_chardev, &ps_pcie_dma_comm_fops);
+ xdev->xps_pcie_chardev.dev = xdev->char_device;
+
+ err = cdev_add(&xdev->xps_pcie_chardev, xdev->char_device,
+ MAX_ALLOWED_CHANNELS_IN_HW);
+ if (err < 0) {
+ pr_err("PS PCIe DMA unable to add cdev\n");
+ goto err_out_cdev_add;
+ }
+
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++) {
+ xdev->chardev[i] =
+ device_create(g_ps_pcie_dma_client_class,
+ xdev->pcie_dma_chan[i].dev,
+ MKDEV(MAJOR(xdev->char_device), i),
+ xdev,
+ "%s%d_%d", CHAR_DRIVER_NAME,
+ i, xdev->properties->board_number);
+
+ if (!xdev->chardev[i]) {
+ err = PTR_ERR(xdev->chardev[i]);
+ pr_err(
+ "PS PCIe DMA Unable to create device %d\n", i);
+ goto err_out_dev_create;
+ }
+ }
+
+ return 0;
+
+err_out_dev_create:
+ while (--i >= 0) {
+ device_destroy(g_ps_pcie_dma_client_class,
+ MKDEV(MAJOR(xdev->char_device), i));
+ }
+ cdev_del(&xdev->xps_pcie_chardev);
+err_out_cdev_add:
+ unregister_chrdev_region(xdev->char_device, MAX_ALLOWED_CHANNELS_IN_HW);
+ return err;
+}
+
+static int create_char_dev_iface_for_pio(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int err;
+
+ err = alloc_chrdev_region(&xdev->pio_char_device, 0, 1,
+ PIO_CHAR_DRIVER_NAME);
+ if (err < 0) {
+ pr_err("Unable to allocate pio character device region\n");
+ return err;
+ }
+
+ xdev->xpio_char_dev.owner = THIS_MODULE;
+ cdev_init(&xdev->xpio_char_dev, &ps_pcie_pio_fops);
+ xdev->xpio_char_dev.dev = xdev->pio_char_device;
+
+ err = cdev_add(&xdev->xpio_char_dev, xdev->pio_char_device, 1);
+ if (err < 0) {
+ pr_err("PS PCIe DMA unable to add cdev for pio\n");
+ goto err_out_pio_cdev_add;
+ }
+
+ xdev->xpio_char_device =
+ device_create(g_ps_pcie_dma_client_class,
+ xdev->pcie_dma_chan[0].dev,
+ MKDEV(MAJOR(xdev->pio_char_device), 0),
+ xdev, "%s_%d", PIO_CHAR_DRIVER_NAME,
+ xdev->properties->board_number);
+
+ if (!xdev->xpio_char_device) {
+ err = PTR_ERR(xdev->xpio_char_device);
+ pr_err("PS PCIe DMA Unable to create pio device\n");
+ goto err_out_pio_dev_create;
+ }
+
+ mutex_init(&xdev->pio_chardev_mutex);
+ xdev->pio_translation_size = 0;
+ init_completion(&xdev->trans_cmpltn);
+
+ return 0;
+
+err_out_pio_dev_create:
+ cdev_del(&xdev->xpio_char_dev);
+err_out_pio_cdev_add:
+ unregister_chrdev_region(xdev->pio_char_device, 1);
+ return err;
+}
+
+static int create_char_dev_interfaces(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int err;
+
+ err = create_char_dev_iface_for_dma_device(xdev);
+
+ if (err != 0) {
+ pr_err("Unable to create char dev dma iface %d\n",
+ xdev->properties->pci_deviceid);
+ goto no_char_iface_for_dma;
+ }
+
+ if (xdev->properties->pio_transfers == PIO_SUPPORTED) {
+ err = create_char_dev_iface_for_pio(xdev);
+ if (err != 0) {
+ pr_err("Unable to create char dev pio iface %d\n",
+ xdev->properties->pci_deviceid);
+ goto no_char_iface_for_pio;
+ }
+ }
+
+ return 0;
+
+no_char_iface_for_pio:
+ destroy_char_iface_for_dma(xdev);
+no_char_iface_for_dma:
+ return err;
+}
+
+static int setup_char_devices(u16 dev_prop_index)
+{
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ int err;
+ int i;
+
+ xdev = kzalloc(sizeof(*xdev), GFP_KERNEL);
+ if (!xdev) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ xdev->properties = &g_dma_deviceproperties_list[dev_prop_index];
+
+ err = acquire_dma_channels(xdev);
+ if (err != 0) {
+ pr_err("Unable to acquire dma channels %d\n",
+ dev_prop_index);
+ goto err_no_dma_channels;
+ }
+
+ err = create_char_dev_interfaces(xdev);
+ if (err != 0) {
+ pr_err("Unable to create char dev interfaces %d\n",
+ dev_prop_index);
+ goto err_no_char_dev_ifaces;
+ }
+
+ list_add_tail(&xdev->dev_node, &g_ps_pcie_dma_client_list);
+
+ return 0;
+
+err_no_char_dev_ifaces:
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++)
+ dma_release_channel(xdev->pcie_dma_chan[i].chan);
+err_no_dma_channels:
+ kfree(xdev);
+ return err;
+}
+
+/**
+ * ps_pcie_dma_client_init - Driver init function
+ *
+ * Return: 0 on success. Non zero on failure
+ */
+static int __init ps_pcie_dma_client_init(void)
+{
+ int err;
+ int i;
+ size_t num_dma_dev_properties;
+
+ INIT_LIST_HEAD(&g_ps_pcie_dma_client_list);
+
+ g_ps_pcie_dma_client_class = class_create(THIS_MODULE, DRV_MODULE_NAME);
+ if (IS_ERR(g_ps_pcie_dma_client_class)) {
+ pr_err("%s failed to create class\n", DRV_MODULE_NAME);
+ return PTR_ERR(g_ps_pcie_dma_client_class);
+ }
+
+ num_dma_dev_properties = ARRAY_SIZE(g_dma_deviceproperties_list);
+ for (i = 0; i < num_dma_dev_properties; i++) {
+ err = setup_char_devices(i);
+ if (err) {
+ pr_err("Error creating char devices for %d\n", i);
+ goto err_no_char_devices;
+ }
+ }
+
+ pr_info("PS PCIe DMA Client Driver Init successful\n");
+ return 0;
+
+err_no_char_devices:
+ delete_char_devices();
+
+ if (g_ps_pcie_dma_client_class)
+ class_destroy(g_ps_pcie_dma_client_class);
+ return err;
+}
+late_initcall(ps_pcie_dma_client_init);
+
+/**
+ * ps_pcie_dma_client_exit - Driver exit function
+ *
+ */
+static void __exit ps_pcie_dma_client_exit(void)
+{
+ delete_char_devices();
+
+ if (g_ps_pcie_dma_client_class)
+ class_destroy(g_ps_pcie_dma_client_class);
+}
+
+module_exit(ps_pcie_dma_client_exit);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Xilinx PS PCIe DMA client Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_ps_pcie_main.c b/drivers/dma/xilinx/xilinx_ps_pcie_main.c
new file mode 100644
index 000000000000..cb3151219083
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_ps_pcie_main.c
@@ -0,0 +1,200 @@
+/*
+ * XILINX PS PCIe driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * Description
+ * PS PCIe DMA is memory mapped DMA used to execute PS to PL transfers
+ * on ZynqMP UltraScale+ Devices.
+ * This PCIe driver creates a platform device with specific platform
+ * info enabling creation of DMA device corresponding to the channel
+ * information provided in the properties
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#include "xilinx_ps_pcie.h"
+#include "../dmaengine.h"
+
+#define DRV_MODULE_NAME "ps_pcie_dma"
+
+static int ps_pcie_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void ps_pcie_dma_remove(struct pci_dev *pdev);
+
+static u32 channel_properties_pcie_axi[] = {
+ (u32)(PCIE_AXI_DIRECTION), (u32)(NUMBER_OF_BUFFER_DESCRIPTORS),
+ (u32)(DEFAULT_DMA_QUEUES), (u32)(CHANNEL_COAELSE_COUNT),
+ (u32)(CHANNEL_POLL_TIMER_FREQUENCY) };
+
+static u32 channel_properties_axi_pcie[] = {
+ (u32)(AXI_PCIE_DIRECTION), (u32)(NUMBER_OF_BUFFER_DESCRIPTORS),
+ (u32)(DEFAULT_DMA_QUEUES), (u32)(CHANNEL_COAELSE_COUNT),
+ (u32)(CHANNEL_POLL_TIMER_FREQUENCY) };
+
+static struct property_entry generic_pcie_ep_property[] = {
+ PROPERTY_ENTRY_U32("numchannels", (u32)MAX_NUMBER_OF_CHANNELS),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel0",
+ channel_properties_pcie_axi),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel1",
+ channel_properties_axi_pcie),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel2",
+ channel_properties_pcie_axi),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel3",
+ channel_properties_axi_pcie),
+ { },
+};
+
+static const struct platform_device_info xlnx_std_platform_dev_info = {
+ .name = XLNX_PLATFORM_DRIVER_NAME,
+ .properties = generic_pcie_ep_property,
+};
+
+/**
+ * ps_pcie_dma_probe - Driver probe function
+ * @pdev: Pointer to the pci_dev structure
+ * @ent: pci device id
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int ps_pcie_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err;
+ struct platform_device *platform_dev;
+ struct platform_device_info platform_dev_info;
+
+ dev_info(&pdev->dev, "PS PCIe DMA Driver probe\n");
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return err;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&pdev->dev, "Cannot set 64 bit DMA mask\n");
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "DMA mask set error\n");
+ return err;
+ }
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&pdev->dev, "Cannot set 64 bit consistent DMA mask\n");
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "Cannot set consistent DMA mask\n");
+ return err;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ /* For Root DMA platform device will be created through device tree */
+ if (pdev->vendor == PCI_VENDOR_ID_XILINX &&
+ pdev->device == ZYNQMP_RC_DMA_DEVID)
+ return 0;
+
+ memcpy(&platform_dev_info, &xlnx_std_platform_dev_info,
+ sizeof(xlnx_std_platform_dev_info));
+
+ /* Do device specific channel configuration changes to
+ * platform_dev_info.properties if required
+ * More information on channel properties can be found
+ * at Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
+ */
+
+ platform_dev_info.parent = &pdev->dev;
+ platform_dev_info.data = &pdev;
+ platform_dev_info.size_data = sizeof(struct pci_dev **);
+
+ platform_dev = platform_device_register_full(&platform_dev_info);
+ if (IS_ERR(platform_dev)) {
+ dev_err(&pdev->dev,
+ "Cannot create platform device, aborting\n");
+ return PTR_ERR(platform_dev);
+ }
+
+ pci_set_drvdata(pdev, platform_dev);
+
+ dev_info(&pdev->dev, "PS PCIe DMA driver successfully probed\n");
+
+ return 0;
+}
+
+static struct pci_device_id ps_pcie_dma_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_XILINX, ZYNQMP_DMA_DEVID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_XILINX, ZYNQMP_RC_DMA_DEVID) },
+ { }
+};
+
+static struct pci_driver ps_pcie_dma_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = ps_pcie_dma_tbl,
+ .probe = ps_pcie_dma_probe,
+ .remove = ps_pcie_dma_remove,
+};
+
+/**
+ * ps_pcie_init - Driver init function
+ *
+ * Return: 0 on success. Non zero on failure
+ */
+static int __init ps_pcie_init(void)
+{
+ int ret;
+
+ pr_info("%s init()\n", DRV_MODULE_NAME);
+
+ ret = pci_register_driver(&ps_pcie_dma_driver);
+ if (ret)
+ return ret;
+
+ ret = dma_platform_driver_register();
+ if (ret)
+ pci_unregister_driver(&ps_pcie_dma_driver);
+
+ return ret;
+}
+
+/**
+ * ps_pcie_dma_remove - Driver remove function
+ * @pdev: Pointer to the pci_dev structure
+ *
+ * Return: void
+ */
+static void ps_pcie_dma_remove(struct pci_dev *pdev)
+{
+ struct platform_device *platform_dev;
+
+ platform_dev = (struct platform_device *)pci_get_drvdata(pdev);
+
+ if (platform_dev)
+ platform_device_unregister(platform_dev);
+}
+
+/**
+ * ps_pcie_exit - Driver exit function
+ *
+ * Return: void
+ */
+static void __exit ps_pcie_exit(void)
+{
+ pr_info("%s exit()\n", DRV_MODULE_NAME);
+
+ dma_platform_driver_unregister();
+ pci_unregister_driver(&ps_pcie_dma_driver);
+}
+
+module_init(ps_pcie_init);
+module_exit(ps_pcie_exit);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Xilinx PS PCIe DMA Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_ps_pcie_platform.c b/drivers/dma/xilinx/xilinx_ps_pcie_platform.c
new file mode 100644
index 000000000000..7599c3a9d300
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_ps_pcie_platform.c
@@ -0,0 +1,3170 @@
+/*
+ * XILINX PS PCIe DMA driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * Description
+ * PS PCIe DMA is memory mapped DMA used to execute PS to PL transfers
+ * on ZynqMP UltraScale+ Devices
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#include "xilinx_ps_pcie.h"
+#include "../dmaengine.h"
+
+#define PLATFORM_DRIVER_NAME "ps_pcie_pform_dma"
+#define MAX_BARS 6
+
+#define DMA_BAR_NUMBER 0
+
+#define MIN_SW_INTR_TRANSACTIONS 2
+
+#define CHANNEL_PROPERTY_LENGTH 50
+#define WORKQ_NAME_SIZE 100
+#define INTR_HANDLR_NAME_SIZE 100
+
+#define PS_PCIE_DMA_IRQ_NOSHARE 0
+
+#define MAX_COALESCE_COUNT 255
+
+#define DMA_CHANNEL_REGS_SIZE 0x80
+
+#define DMA_SRCQPTRLO_REG_OFFSET (0x00) /* Source Q pointer Lo */
+#define DMA_SRCQPTRHI_REG_OFFSET (0x04) /* Source Q pointer Hi */
+#define DMA_SRCQSZ_REG_OFFSET (0x08) /* Source Q size */
+#define DMA_SRCQLMT_REG_OFFSET (0x0C) /* Source Q limit */
+#define DMA_DSTQPTRLO_REG_OFFSET (0x10) /* Destination Q pointer Lo */
+#define DMA_DSTQPTRHI_REG_OFFSET (0x14) /* Destination Q pointer Hi */
+#define DMA_DSTQSZ_REG_OFFSET (0x18) /* Destination Q size */
+#define DMA_DSTQLMT_REG_OFFSET (0x1C) /* Destination Q limit */
+#define DMA_SSTAQPTRLO_REG_OFFSET (0x20) /* Source Status Q pointer Lo */
+#define DMA_SSTAQPTRHI_REG_OFFSET (0x24) /* Source Status Q pointer Hi */
+#define DMA_SSTAQSZ_REG_OFFSET (0x28) /* Source Status Q size */
+#define DMA_SSTAQLMT_REG_OFFSET (0x2C) /* Source Status Q limit */
+#define DMA_DSTAQPTRLO_REG_OFFSET (0x30) /* Destination Status Q pointer Lo */
+#define DMA_DSTAQPTRHI_REG_OFFSET (0x34) /* Destination Status Q pointer Hi */
+#define DMA_DSTAQSZ_REG_OFFSET (0x38) /* Destination Status Q size */
+#define DMA_DSTAQLMT_REG_OFFSET (0x3C) /* Destination Status Q limit */
+#define DMA_SRCQNXT_REG_OFFSET (0x40) /* Source Q next */
+#define DMA_DSTQNXT_REG_OFFSET (0x44) /* Destination Q next */
+#define DMA_SSTAQNXT_REG_OFFSET (0x48) /* Source Status Q next */
+#define DMA_DSTAQNXT_REG_OFFSET (0x4C) /* Destination Status Q next */
+#define DMA_SCRATCH0_REG_OFFSET (0x50) /* Scratch pad register 0 */
+
+#define DMA_PCIE_INTR_CNTRL_REG_OFFSET (0x60) /* DMA PCIe intr control reg */
+#define DMA_PCIE_INTR_STATUS_REG_OFFSET (0x64) /* DMA PCIe intr status reg */
+#define DMA_AXI_INTR_CNTRL_REG_OFFSET (0x68) /* DMA AXI intr control reg */
+#define DMA_AXI_INTR_STATUS_REG_OFFSET (0x6C) /* DMA AXI intr status reg */
+#define DMA_PCIE_INTR_ASSRT_REG_OFFSET (0x70) /* PCIe intr assert reg */
+#define DMA_AXI_INTR_ASSRT_REG_OFFSET (0x74) /* AXI intr assert register */
+#define DMA_CNTRL_REG_OFFSET (0x78) /* DMA control register */
+#define DMA_STATUS_REG_OFFSET (0x7C) /* DMA status register */
+
+#define DMA_CNTRL_RST_BIT BIT(1)
+#define DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT BIT(2)
+#define DMA_CNTRL_ENABL_BIT BIT(0)
+#define DMA_STATUS_DMA_PRES_BIT BIT(15)
+#define DMA_STATUS_DMA_RUNNING_BIT BIT(0)
+#define DMA_QPTRLO_QLOCAXI_BIT BIT(0)
+#define DMA_QPTRLO_Q_ENABLE_BIT BIT(1)
+#define DMA_INTSTATUS_DMAERR_BIT BIT(1)
+#define DMA_INTSTATUS_SGLINTR_BIT BIT(2)
+#define DMA_INTSTATUS_SWINTR_BIT BIT(3)
+#define DMA_INTCNTRL_ENABLINTR_BIT BIT(0)
+#define DMA_INTCNTRL_DMAERRINTR_BIT BIT(1)
+#define DMA_INTCNTRL_DMASGINTR_BIT BIT(2)
+#define DMA_SW_INTR_ASSRT_BIT BIT(3)
+
+#define SOURCE_CONTROL_BD_BYTE_COUNT_MASK GENMASK(23, 0)
+#define SOURCE_CONTROL_BD_LOC_AXI BIT(24)
+#define SOURCE_CONTROL_BD_EOP_BIT BIT(25)
+#define SOURCE_CONTROL_BD_INTR_BIT BIT(26)
+#define SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT BIT(25)
+#define SOURCE_CONTROL_ATTRIBUTES_MASK GENMASK(31, 28)
+#define SRC_CTL_ATTRIB_BIT_SHIFT (29)
+
+#define STA_BD_COMPLETED_BIT BIT(0)
+#define STA_BD_SOURCE_ERROR_BIT BIT(1)
+#define STA_BD_DESTINATION_ERROR_BIT BIT(2)
+#define STA_BD_INTERNAL_ERROR_BIT BIT(3)
+#define STA_BD_UPPER_STATUS_NONZERO_BIT BIT(31)
+#define STA_BD_BYTE_COUNT_MASK GENMASK(30, 4)
+
+#define STA_BD_BYTE_COUNT_SHIFT 4
+
+#define DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT (16)
+
+#define DMA_SRC_Q_LOW_BIT_SHIFT GENMASK(5, 0)
+
+#define MAX_TRANSFER_LENGTH 0x1000000
+
+#define AXI_ATTRIBUTE 0x3
+#define PCI_ATTRIBUTE 0x2
+
+#define ROOTDMA_Q_READ_ATTRIBUTE 0x8
+
+/*
+ * User Id programmed into Source Q will be copied into Status Q of Destination
+ */
+#define DEFAULT_UID 1
+
+/*
+ * DMA channel registers
+ */
+struct DMA_ENGINE_REGISTERS {
+ u32 src_q_low; /* 0x00 */
+ u32 src_q_high; /* 0x04 */
+ u32 src_q_size; /* 0x08 */
+ u32 src_q_limit; /* 0x0C */
+ u32 dst_q_low; /* 0x10 */
+ u32 dst_q_high; /* 0x14 */
+ u32 dst_q_size; /* 0x18 */
+ u32 dst_q_limit; /* 0x1c */
+ u32 stas_q_low; /* 0x20 */
+ u32 stas_q_high; /* 0x24 */
+ u32 stas_q_size; /* 0x28 */
+ u32 stas_q_limit; /* 0x2C */
+ u32 stad_q_low; /* 0x30 */
+ u32 stad_q_high; /* 0x34 */
+ u32 stad_q_size; /* 0x38 */
+ u32 stad_q_limit; /* 0x3C */
+ u32 src_q_next; /* 0x40 */
+ u32 dst_q_next; /* 0x44 */
+ u32 stas_q_next; /* 0x48 */
+ u32 stad_q_next; /* 0x4C */
+ u32 scrathc0; /* 0x50 */
+ u32 scrathc1; /* 0x54 */
+ u32 scrathc2; /* 0x58 */
+ u32 scrathc3; /* 0x5C */
+ u32 pcie_intr_cntrl; /* 0x60 */
+ u32 pcie_intr_status; /* 0x64 */
+ u32 axi_intr_cntrl; /* 0x68 */
+ u32 axi_intr_status; /* 0x6C */
+ u32 pcie_intr_assert; /* 0x70 */
+ u32 axi_intr_assert; /* 0x74 */
+ u32 dma_channel_ctrl; /* 0x78 */
+ u32 dma_channel_status; /* 0x7C */
+} __attribute__((__packed__));
+
+/**
+ * struct SOURCE_DMA_DESCRIPTOR - Source Hardware Descriptor
+ * @system_address: 64 bit buffer physical address
+ * @control_byte_count: Byte count/buffer length and control flags
+ * @user_handle: User handle gets copied to status q on completion
+ * @user_id: User id gets copied to status q of destination
+ */
+struct SOURCE_DMA_DESCRIPTOR {
+ u64 system_address;
+ u32 control_byte_count;
+ u16 user_handle;
+ u16 user_id;
+} __attribute__((__packed__));
+
+/**
+ * struct DEST_DMA_DESCRIPTOR - Destination Hardware Descriptor
+ * @system_address: 64 bit buffer physical address
+ * @control_byte_count: Byte count/buffer length and control flags
+ * @user_handle: User handle gets copied to status q on completion
+ * @reserved: Reserved field
+ */
+struct DEST_DMA_DESCRIPTOR {
+ u64 system_address;
+ u32 control_byte_count;
+ u16 user_handle;
+ u16 reserved;
+} __attribute__((__packed__));
+
+/**
+ * struct STATUS_DMA_DESCRIPTOR - Status Hardware Descriptor
+ * @status_flag_byte_count: Byte count/buffer length and status flags
+ * @user_handle: User handle gets copied from src/dstq on completion
+ * @user_id: User id gets copied from srcq
+ */
+struct STATUS_DMA_DESCRIPTOR {
+ u32 status_flag_byte_count;
+ u16 user_handle;
+ u16 user_id;
+} __attribute__((__packed__));
+
+enum PACKET_CONTEXT_AVAILABILITY {
+ FREE = 0, /*Packet transfer Parameter context is free.*/
+ IN_USE /*Packet transfer Parameter context is in use.*/
+};
+
+struct ps_pcie_transfer_elements {
+ struct list_head node;
+ dma_addr_t src_pa;
+ dma_addr_t dst_pa;
+ u32 transfer_bytes;
+};
+
+struct ps_pcie_tx_segment {
+ struct list_head node;
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head transfer_nodes;
+ u32 src_elements;
+ u32 dst_elements;
+ u32 total_transfer_bytes;
+};
+
+struct ps_pcie_intr_segment {
+ struct list_head node;
+ struct dma_async_tx_descriptor async_intr_tx;
+};
+
+/*
+ * The context structure stored for each DMA transaction
+ * This structure is maintained separately for Src Q and Destination Q
+ * @availability_status: Indicates whether packet context is available
+ * @idx_sop: Indicates starting index of buffer descriptor for a transfer
+ * @idx_eop: Indicates ending index of buffer descriptor for a transfer
+ * @sgl: Indicates either src or dst sglist for the transaction
+ */
+struct PACKET_TRANSFER_PARAMS {
+ enum PACKET_CONTEXT_AVAILABILITY availability_status;
+ u16 idx_sop;
+ u16 idx_eop;
+ struct ps_pcie_tx_segment *seg;
+};
+
+enum CHANNEL_STATE {
+ CHANNEL_RESOURCE_UNALLOCATED = 0, /* Channel resources not allocated */
+ CHANNEL_UNAVIALBLE, /* Channel inactive */
+ CHANNEL_AVAILABLE, /* Channel available for transfers */
+ CHANNEL_ERROR /* Channel encountered errors */
+};
+
+enum BUFFER_LOCATION {
+ BUFFER_LOC_PCI = 0,
+ BUFFER_LOC_AXI,
+ BUFFER_LOC_INVALID
+};
+
+enum dev_channel_properties {
+ DMA_CHANNEL_DIRECTION = 0,
+ NUM_DESCRIPTORS,
+ NUM_QUEUES,
+ COALESE_COUNT,
+ POLL_TIMER_FREQUENCY
+};
+
+/*
+ * struct ps_pcie_dma_chan - Driver specific DMA channel structure
+ * @xdev: Driver specific device structure
+ * @dev: The dma device
+ * @common: DMA common channel
+ * @chan_base: Pointer to Channel registers
+ * @channel_number: DMA channel number in the device
+ * @num_queues: Number of queues per channel.
+ * It should be four for memory mapped case and
+ * two for Streaming case
+ * @direction: Transfer direction
+ * @state: Indicates channel state
+ * @channel_lock: Spin lock to be used before changing channel state
+ * @cookie_lock: Spin lock to be used before assigning cookie for a transaction
+ * @coalesce_count: Indicates number of packet transfers before interrupts
+ * @poll_timer_freq:Indicates frequency of polling for completed transactions
+ * @poll_timer: Timer to poll dma buffer descriptors if coalesce count is > 0
+ * @src_avail_descriptors: Available sgl source descriptors
+ * @src_desc_lock: Lock for synchronizing src_avail_descriptors
+ * @dst_avail_descriptors: Available sgl destination descriptors
+ * @dst_desc_lock: Lock for synchronizing
+ * dst_avail_descriptors
+ * @src_sgl_bd_pa: Physical address of Source SGL buffer Descriptors
+ * @psrc_sgl_bd: Virtual address of Source SGL buffer Descriptors
+ * @src_sgl_freeidx: Holds index of Source SGL buffer descriptor to be filled
+ * @sglDestinationQLock:Lock to serialize Destination Q updates
+ * @dst_sgl_bd_pa: Physical address of Dst SGL buffer Descriptors
+ * @pdst_sgl_bd: Virtual address of Dst SGL buffer Descriptors
+ * @dst_sgl_freeidx: Holds index of Destination SGL
+ * @src_sta_bd_pa: Physical address of StatusQ buffer Descriptors
+ * @psrc_sta_bd: Virtual address of Src StatusQ buffer Descriptors
+ * @src_staprobe_idx: Holds index of Status Q to be examined for SrcQ updates
+ * @src_sta_hw_probe_idx: Holds index of maximum limit of Status Q for hardware
+ * @dst_sta_bd_pa: Physical address of Dst StatusQ buffer Descriptor
+ * @pdst_sta_bd: Virtual address of Dst Status Q buffer Descriptors
+ * @dst_staprobe_idx: Holds index of Status Q to be examined for updates
+ * @dst_sta_hw_probe_idx: Holds index of max limit of Dst Status Q for hardware
+ * @@read_attribute: Describes the attributes of buffer in srcq
+ * @@write_attribute: Describes the attributes of buffer in dstq
+ * @@intr_status_offset: Register offset to be cheked on receiving interrupt
+ * @@intr_status_offset: Register offset to be used to control interrupts
+ * @ppkt_ctx_srcq: Virtual address of packet context to Src Q updates
+ * @idx_ctx_srcq_head: Holds index of packet context to be filled for Source Q
+ * @idx_ctx_srcq_tail: Holds index of packet context to be examined for Source Q
+ * @ppkt_ctx_dstq: Virtual address of packet context to Dst Q updates
+ * @idx_ctx_dstq_head: Holds index of packet context to be filled for Dst Q
+ * @idx_ctx_dstq_tail: Holds index of packet context to be examined for Dst Q
+ * @pending_list_lock: Lock to be taken before updating pending transfers list
+ * @pending_list: List of transactions submitted to channel
+ * @active_list_lock: Lock to be taken before transferring transactions from
+ * pending list to active list which will be subsequently
+ * submitted to hardware
+ * @active_list: List of transactions that will be submitted to hardware
+ * @pending_interrupts_lock: Lock to be taken before updating pending Intr list
+ * @pending_interrupts_list: List of interrupt transactions submitted to channel
+ * @active_interrupts_lock: Lock to be taken before transferring transactions
+ * from pending interrupt list to active interrupt list
+ * @active_interrupts_list: List of interrupt transactions that are active
+ * @transactions_pool: Mem pool to allocate dma transactions quickly
+ * @intr_transactions_pool: Mem pool to allocate interrupt transactions quickly
+ * @sw_intrs_wrkq: Work Q which performs handling of software intrs
+ * @handle_sw_intrs:Work function handling software interrupts
+ * @maintenance_workq: Work Q to perform maintenance tasks during stop or error
+ * @handle_chan_reset: Work that invokes channel reset function
+ * @handle_chan_shutdown: Work that invokes channel shutdown function
+ * @handle_chan_terminate: Work that invokes channel transactions termination
+ * @chan_shutdown_complt: Completion variable which says shutdown is done
+ * @chan_terminate_complete: Completion variable which says terminate is done
+ * @primary_desc_cleanup: Work Q which performs work related to sgl handling
+ * @handle_primary_desc_cleanup: Work that invokes src Q, dst Q cleanup
+ * and programming
+ * @chan_programming: Work Q which performs work related to channel programming
+ * @handle_chan_programming: Work that invokes channel programming function
+ * @srcq_desc_cleanup: Work Q which performs src Q descriptor cleanup
+ * @handle_srcq_desc_cleanup: Work function handling Src Q completions
+ * @dstq_desc_cleanup: Work Q which performs dst Q descriptor cleanup
+ * @handle_dstq_desc_cleanup: Work function handling Dst Q completions
+ * @srcq_work_complete: Src Q Work completion variable for primary work
+ * @dstq_work_complete: Dst Q Work completion variable for primary work
+ */
+struct ps_pcie_dma_chan {
+ struct xlnx_pcie_dma_device *xdev;
+ struct device *dev;
+
+ struct dma_chan common;
+
+ struct DMA_ENGINE_REGISTERS *chan_base;
+ u16 channel_number;
+
+ u32 num_queues;
+ enum dma_data_direction direction;
+ enum BUFFER_LOCATION srcq_buffer_location;
+ enum BUFFER_LOCATION dstq_buffer_location;
+
+ u32 total_descriptors;
+
+ enum CHANNEL_STATE state;
+ spinlock_t channel_lock; /* For changing channel state */
+
+ spinlock_t cookie_lock; /* For acquiring cookie from dma framework*/
+
+ u32 coalesce_count;
+ u32 poll_timer_freq;
+
+ struct timer_list poll_timer;
+
+ u32 src_avail_descriptors;
+ spinlock_t src_desc_lock; /* For handling srcq available descriptors */
+
+ u32 dst_avail_descriptors;
+ spinlock_t dst_desc_lock; /* For handling dstq available descriptors */
+
+ dma_addr_t src_sgl_bd_pa;
+ struct SOURCE_DMA_DESCRIPTOR *psrc_sgl_bd;
+ u32 src_sgl_freeidx;
+
+ dma_addr_t dst_sgl_bd_pa;
+ struct DEST_DMA_DESCRIPTOR *pdst_sgl_bd;
+ u32 dst_sgl_freeidx;
+
+ dma_addr_t src_sta_bd_pa;
+ struct STATUS_DMA_DESCRIPTOR *psrc_sta_bd;
+ u32 src_staprobe_idx;
+ u32 src_sta_hw_probe_idx;
+
+ dma_addr_t dst_sta_bd_pa;
+ struct STATUS_DMA_DESCRIPTOR *pdst_sta_bd;
+ u32 dst_staprobe_idx;
+ u32 dst_sta_hw_probe_idx;
+
+ u32 read_attribute;
+ u32 write_attribute;
+
+ u32 intr_status_offset;
+ u32 intr_control_offset;
+
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx_srcq;
+ u16 idx_ctx_srcq_head;
+ u16 idx_ctx_srcq_tail;
+
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx_dstq;
+ u16 idx_ctx_dstq_head;
+ u16 idx_ctx_dstq_tail;
+
+ spinlock_t pending_list_lock; /* For handling dma pending_list */
+ struct list_head pending_list;
+ spinlock_t active_list_lock; /* For handling dma active_list */
+ struct list_head active_list;
+
+ spinlock_t pending_interrupts_lock; /* For dma pending interrupts list*/
+ struct list_head pending_interrupts_list;
+ spinlock_t active_interrupts_lock; /* For dma active interrupts list*/
+ struct list_head active_interrupts_list;
+
+ mempool_t *transactions_pool;
+ mempool_t *tx_elements_pool;
+ mempool_t *intr_transactions_pool;
+
+ struct workqueue_struct *sw_intrs_wrkq;
+ struct work_struct handle_sw_intrs;
+
+ struct workqueue_struct *maintenance_workq;
+ struct work_struct handle_chan_reset;
+ struct work_struct handle_chan_shutdown;
+ struct work_struct handle_chan_terminate;
+
+ struct completion chan_shutdown_complt;
+ struct completion chan_terminate_complete;
+
+ struct workqueue_struct *primary_desc_cleanup;
+ struct work_struct handle_primary_desc_cleanup;
+
+ struct workqueue_struct *chan_programming;
+ struct work_struct handle_chan_programming;
+
+ struct workqueue_struct *srcq_desc_cleanup;
+ struct work_struct handle_srcq_desc_cleanup;
+ struct completion srcq_work_complete;
+
+ struct workqueue_struct *dstq_desc_cleanup;
+ struct work_struct handle_dstq_desc_cleanup;
+ struct completion dstq_work_complete;
+};
+
+/*
+ * struct xlnx_pcie_dma_device - Driver specific platform device structure
+ * @is_rootdma: Indicates whether the dma instance is root port dma
+ * @dma_buf_ext_addr: Indicates whether target system is 32 bit or 64 bit
+ * @bar_mask: Indicates available pcie bars
+ * @board_number: Count value of platform device
+ * @dev: Device structure pointer for pcie device
+ * @channels: Pointer to device DMA channels structure
+ * @common: DMA device structure
+ * @num_channels: Number of channels active for the device
+ * @reg_base: Base address of first DMA channel of the device
+ * @irq_vecs: Number of irq vectors allocated to pci device
+ * @pci_dev: Parent pci device which created this platform device
+ * @bar_info: PCIe bar related information
+ * @platform_irq_vec: Platform irq vector number for root dma
+ * @rootdma_vendor: PCI Vendor id for root dma
+ * @rootdma_device: PCI Device id for root dma
+ */
+struct xlnx_pcie_dma_device {
+ bool is_rootdma;
+ bool dma_buf_ext_addr;
+ u32 bar_mask;
+ u16 board_number;
+ struct device *dev;
+ struct ps_pcie_dma_chan *channels;
+ struct dma_device common;
+ int num_channels;
+ int irq_vecs;
+ void __iomem *reg_base;
+ struct pci_dev *pci_dev;
+ struct BAR_PARAMS bar_info[MAX_BARS];
+ int platform_irq_vec;
+ u16 rootdma_vendor;
+ u16 rootdma_device;
+};
+
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct ps_pcie_dma_chan, common)
+#define to_ps_pcie_dma_tx_descriptor(tx) \
+ container_of(tx, struct ps_pcie_tx_segment, async_tx)
+#define to_ps_pcie_dma_tx_intr_descriptor(tx) \
+ container_of(tx, struct ps_pcie_intr_segment, async_intr_tx)
+
+/* Function Protypes */
+static u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg);
+static void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 value);
+static void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask);
+static void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask);
+static int irq_setup(struct xlnx_pcie_dma_device *xdev);
+static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev);
+static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev);
+static int device_intr_setup(struct xlnx_pcie_dma_device *xdev);
+static int irq_probe(struct xlnx_pcie_dma_device *xdev);
+static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan);
+static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data);
+static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data);
+static int init_hw_components(struct ps_pcie_dma_chan *chan);
+static int init_sw_components(struct ps_pcie_dma_chan *chan);
+static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan);
+static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan);
+static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan);
+static void poll_completed_transactions(struct timer_list *t);
+static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void handle_error(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void ps_pcie_chan_program_work(struct work_struct *work);
+static void dst_cleanup_work(struct work_struct *work);
+static void src_cleanup_work(struct work_struct *work);
+static void ps_pcie_chan_primary_work(struct work_struct *work);
+static int probe_channel_properties(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev,
+ u16 channel_number);
+static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan);
+static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan);
+static void terminate_transactions_work(struct work_struct *work);
+static void chan_shutdown_work(struct work_struct *work);
+static void chan_reset_work(struct work_struct *work);
+static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan);
+static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan);
+static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan);
+static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan);
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx);
+static struct dma_async_tx_descriptor *
+xlnx_ps_pcie_dma_prep_memcpy(struct dma_chan *channel, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len,
+ unsigned long flags);
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
+ struct dma_chan *channel, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
+ struct dma_chan *channel, unsigned long flags);
+static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel);
+static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel);
+static int read_rootdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev);
+static int read_epdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev);
+static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev);
+static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev);
+
+/* IO accessors */
+static inline u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg)
+{
+ return ioread32((void __iomem *)((char *)(chan->chan_base) + reg));
+}
+
+static inline void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 value)
+{
+ iowrite32(value, (void __iomem *)((char *)(chan->chan_base) + reg));
+}
+
+static inline void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask)
+{
+ ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) & ~mask);
+}
+
+static inline void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask)
+{
+ ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) | mask);
+}
+
+/**
+ * ps_pcie_dma_dev_intr_handler - This will be invoked for MSI/Legacy interrupts
+ *
+ * @irq: IRQ number
+ * @data: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data)
+{
+ struct xlnx_pcie_dma_device *xdev =
+ (struct xlnx_pcie_dma_device *)data;
+ struct ps_pcie_dma_chan *chan = NULL;
+ int i;
+ int err = -1;
+ int ret = -1;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ chan = &xdev->channels[i];
+ err = ps_pcie_check_intr_status(chan);
+ if (err == 0)
+ ret = 0;
+ }
+
+ return (ret == 0) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/**
+ * ps_pcie_dma_chan_intr_handler - This will be invoked for MSI-X interrupts
+ *
+ * @irq: IRQ number
+ * @data: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data)
+{
+ struct ps_pcie_dma_chan *chan = (struct ps_pcie_dma_chan *)data;
+
+ ps_pcie_check_intr_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * chan_intr_setup - Requests Interrupt handler for individual channels
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ struct ps_pcie_dma_chan *chan;
+ int i;
+ int err = 0;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ chan = &xdev->channels[i];
+ err = devm_request_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, i),
+ ps_pcie_dma_chan_intr_handler,
+ PS_PCIE_DMA_IRQ_NOSHARE,
+ "PS PCIe DMA Chan Intr handler", chan);
+ if (err) {
+ dev_err(xdev->dev,
+ "Irq %d for chan %d error %d\n",
+ pci_irq_vector(xdev->pci_dev, i),
+ chan->channel_number, err);
+ break;
+ }
+ }
+
+ if (err) {
+ while (--i >= 0) {
+ chan = &xdev->channels[i];
+ devm_free_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, i), chan);
+ }
+ }
+
+ return err;
+}
+
+/**
+ * device_intr_setup - Requests interrupt handler for DMA device
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int device_intr_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ unsigned long intr_flags = IRQF_SHARED;
+
+ if (xdev->pci_dev->msix_enabled || xdev->pci_dev->msi_enabled)
+ intr_flags = PS_PCIE_DMA_IRQ_NOSHARE;
+
+ err = devm_request_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, 0),
+ ps_pcie_dma_dev_intr_handler,
+ intr_flags,
+ "PS PCIe DMA Intr Handler", xdev);
+ if (err)
+ dev_err(xdev->dev, "Couldn't request irq %d\n",
+ pci_irq_vector(xdev->pci_dev, 0));
+
+ return err;
+}
+
+/**
+ * irq_setup - Requests interrupts based on the interrupt type detected
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int irq_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+
+ if (xdev->irq_vecs == xdev->num_channels)
+ err = chan_intr_setup(xdev);
+ else
+ err = device_intr_setup(xdev);
+
+ return err;
+}
+
+static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+
+ err = devm_request_irq(xdev->dev,
+ xdev->platform_irq_vec,
+ ps_pcie_dma_dev_intr_handler,
+ IRQF_SHARED,
+ "PS PCIe Root DMA Handler", xdev);
+ if (err)
+ dev_err(xdev->dev, "Couldn't request irq %d\n",
+ xdev->platform_irq_vec);
+
+ return err;
+}
+
+/**
+ * irq_probe - Checks which interrupt types can be serviced by hardware
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: Number of interrupt vectors when successful or -ENOSPC on failure
+ */
+static int irq_probe(struct xlnx_pcie_dma_device *xdev)
+{
+ struct pci_dev *pdev;
+
+ pdev = xdev->pci_dev;
+
+ xdev->irq_vecs = pci_alloc_irq_vectors(pdev, 1, xdev->num_channels,
+ PCI_IRQ_ALL_TYPES);
+ return xdev->irq_vecs;
+}
+
+/**
+ * ps_pcie_check_intr_status - Checks channel interrupt status
+ *
+ * @chan: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: 0 if interrupt is pending on channel
+ * -1 if no interrupt is pending on channel
+ */
+static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan)
+{
+ int err = -1;
+ u32 status;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return err;
+
+ status = ps_pcie_dma_read(chan, chan->intr_status_offset);
+
+ if (status & DMA_INTSTATUS_SGLINTR_BIT) {
+ if (chan->primary_desc_cleanup) {
+ queue_work(chan->primary_desc_cleanup,
+ &chan->handle_primary_desc_cleanup);
+ }
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_SGLINTR_BIT);
+ err = 0;
+ }
+
+ if (status & DMA_INTSTATUS_SWINTR_BIT) {
+ if (chan->sw_intrs_wrkq)
+ queue_work(chan->sw_intrs_wrkq, &chan->handle_sw_intrs);
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_SWINTR_BIT);
+ err = 0;
+ }
+
+ if (status & DMA_INTSTATUS_DMAERR_BIT) {
+ dev_err(chan->dev,
+ "DMA Channel %d ControlStatus Reg: 0x%x",
+ chan->channel_number, status);
+ dev_err(chan->dev,
+ "Chn %d SrcQLmt = %d SrcQSz = %d SrcQNxt = %d",
+ chan->channel_number,
+ chan->chan_base->src_q_limit,
+ chan->chan_base->src_q_size,
+ chan->chan_base->src_q_next);
+ dev_err(chan->dev,
+ "Chn %d SrcStaLmt = %d SrcStaSz = %d SrcStaNxt = %d",
+ chan->channel_number,
+ chan->chan_base->stas_q_limit,
+ chan->chan_base->stas_q_size,
+ chan->chan_base->stas_q_next);
+ dev_err(chan->dev,
+ "Chn %d DstQLmt = %d DstQSz = %d DstQNxt = %d",
+ chan->channel_number,
+ chan->chan_base->dst_q_limit,
+ chan->chan_base->dst_q_size,
+ chan->chan_base->dst_q_next);
+ dev_err(chan->dev,
+ "Chan %d DstStaLmt = %d DstStaSz = %d DstStaNxt = %d",
+ chan->channel_number,
+ chan->chan_base->stad_q_limit,
+ chan->chan_base->stad_q_size,
+ chan->chan_base->stad_q_next);
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_DMAERR_BIT);
+
+ handle_error(chan);
+
+ err = 0;
+ }
+
+ return err;
+}
+
+static int init_hw_components(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->psrc_sgl_bd && chan->psrc_sta_bd) {
+ /* Programming SourceQ and StatusQ bd addresses */
+ chan->chan_base->src_q_next = 0;
+ chan->chan_base->src_q_high =
+ upper_32_bits(chan->src_sgl_bd_pa);
+ chan->chan_base->src_q_size = chan->total_descriptors;
+ chan->chan_base->src_q_limit = 0;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->src_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->src_q_low = 0;
+ }
+ chan->chan_base->src_q_low |=
+ (lower_32_bits((chan->src_sgl_bd_pa))
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+
+ chan->chan_base->stas_q_next = 0;
+ chan->chan_base->stas_q_high =
+ upper_32_bits(chan->src_sta_bd_pa);
+ chan->chan_base->stas_q_size = chan->total_descriptors;
+ chan->chan_base->stas_q_limit = chan->total_descriptors - 1;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->stas_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->stas_q_low = 0;
+ }
+ chan->chan_base->stas_q_low |=
+ (lower_32_bits(chan->src_sta_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+ }
+
+ if (chan->pdst_sgl_bd && chan->pdst_sta_bd) {
+ /* Programming DestinationQ and StatusQ buffer descriptors */
+ chan->chan_base->dst_q_next = 0;
+ chan->chan_base->dst_q_high =
+ upper_32_bits(chan->dst_sgl_bd_pa);
+ chan->chan_base->dst_q_size = chan->total_descriptors;
+ chan->chan_base->dst_q_limit = 0;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->dst_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->dst_q_low = 0;
+ }
+ chan->chan_base->dst_q_low |=
+ (lower_32_bits(chan->dst_sgl_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+
+ chan->chan_base->stad_q_next = 0;
+ chan->chan_base->stad_q_high =
+ upper_32_bits(chan->dst_sta_bd_pa);
+ chan->chan_base->stad_q_size = chan->total_descriptors;
+ chan->chan_base->stad_q_limit = chan->total_descriptors - 1;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->stad_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->stad_q_low = 0;
+ }
+ chan->chan_base->stad_q_low |=
+ (lower_32_bits(chan->dst_sta_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+ }
+
+ return 0;
+}
+
+static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->xdev->is_rootdma) {
+ /* For Root DMA, Host Memory and Buffer Descriptors
+ * will be on AXI side
+ */
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ chan->read_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->read_attribute = AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ }
+ } else {
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ chan->read_attribute = PCI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->read_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ }
+ }
+}
+
+static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->xdev->is_rootdma) {
+ /* For Root DMA, Host Memory and Buffer Descriptors
+ * will be on AXI side
+ */
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ chan->write_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->write_attribute = AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ }
+ } else {
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ chan->write_attribute = PCI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ } else if (chan->dstq_buffer_location == BUFFER_LOC_AXI) {
+ chan->write_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ }
+ }
+ chan->write_attribute |= SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT;
+}
+
+static int init_sw_components(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->ppkt_ctx_srcq && chan->psrc_sgl_bd &&
+ chan->psrc_sta_bd) {
+ memset(chan->ppkt_ctx_srcq, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS)
+ * chan->total_descriptors);
+
+ memset(chan->psrc_sgl_bd, 0,
+ sizeof(struct SOURCE_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ memset(chan->psrc_sta_bd, 0,
+ sizeof(struct STATUS_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ chan->src_avail_descriptors = chan->total_descriptors;
+
+ chan->src_sgl_freeidx = 0;
+ chan->src_staprobe_idx = 0;
+ chan->src_sta_hw_probe_idx = chan->total_descriptors - 1;
+ chan->idx_ctx_srcq_head = 0;
+ chan->idx_ctx_srcq_tail = 0;
+ }
+
+ if (chan->ppkt_ctx_dstq && chan->pdst_sgl_bd &&
+ chan->pdst_sta_bd) {
+ memset(chan->ppkt_ctx_dstq, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS)
+ * chan->total_descriptors);
+
+ memset(chan->pdst_sgl_bd, 0,
+ sizeof(struct DEST_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ memset(chan->pdst_sta_bd, 0,
+ sizeof(struct STATUS_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ chan->dst_avail_descriptors = chan->total_descriptors;
+
+ chan->dst_sgl_freeidx = 0;
+ chan->dst_staprobe_idx = 0;
+ chan->dst_sta_hw_probe_idx = chan->total_descriptors - 1;
+ chan->idx_ctx_dstq_head = 0;
+ chan->idx_ctx_dstq_tail = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ps_pcie_chan_reset - Resets channel, by programming relevant registers
+ *
+ * @chan: PS PCIe DMA channel information holder
+ * Return: void
+ */
+static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan)
+{
+ /* Enable channel reset */
+ ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
+
+ mdelay(10);
+
+ /* Disable channel reset */
+ ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
+}
+
+/**
+ * poll_completed_transactions - Function invoked by poll timer
+ *
+ * @t: Pointer to timer triggering this callback
+ * Return: void
+ */
+static void poll_completed_transactions(struct timer_list *t)
+{
+ struct ps_pcie_dma_chan *chan = from_timer(chan, t, poll_timer);
+
+ if (chan->state == CHANNEL_AVAILABLE) {
+ queue_work(chan->primary_desc_cleanup,
+ &chan->handle_primary_desc_cleanup);
+ }
+
+ mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
+}
+
+static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if (seg->src_elements) {
+ if (chan->src_avail_descriptors >=
+ seg->src_elements) {
+ return true;
+ }
+ } else if (seg->dst_elements) {
+ if (chan->dst_avail_descriptors >=
+ seg->dst_elements) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if (chan->src_avail_descriptors >=
+ seg->src_elements &&
+ chan->dst_avail_descriptors >=
+ seg->dst_elements) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if (chan->num_queues == DEFAULT_DMA_QUEUES)
+ return check_descriptors_for_all_queues(chan, seg);
+ else
+ return check_descriptors_for_two_queues(chan, seg);
+}
+
+static void handle_error(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->state != CHANNEL_AVAILABLE)
+ return;
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_ERROR;
+ spin_unlock(&chan->channel_lock);
+
+ if (chan->maintenance_workq)
+ queue_work(chan->maintenance_workq, &chan->handle_chan_reset);
+}
+
+static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ struct SOURCE_DMA_DESCRIPTOR *pdesc;
+ struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
+ struct ps_pcie_transfer_elements *ele = NULL;
+ u32 i = 0;
+
+ pkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_head;
+ if (pkt_ctx->availability_status == IN_USE) {
+ dev_err(chan->dev,
+ "src pkt context not avail for channel %d\n",
+ chan->channel_number);
+ handle_error(chan);
+ return;
+ }
+
+ pkt_ctx->availability_status = IN_USE;
+
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI)
+ pkt_ctx->seg = seg;
+
+ /* Get the address of the next available DMA Descriptor */
+ pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
+ pkt_ctx->idx_sop = chan->src_sgl_freeidx;
+
+ /* Build transactions using information in the scatter gather list */
+ list_for_each_entry(ele, &seg->transfer_nodes, node) {
+ if (chan->xdev->dma_buf_ext_addr) {
+ pdesc->system_address =
+ (u64)ele->src_pa;
+ } else {
+ pdesc->system_address =
+ (u32)ele->src_pa;
+ }
+
+ pdesc->control_byte_count = (ele->transfer_bytes &
+ SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
+ chan->read_attribute;
+
+ pdesc->user_handle = chan->idx_ctx_srcq_head;
+ pdesc->user_id = DEFAULT_UID;
+ /* Check if this is last descriptor */
+ if (i == (seg->src_elements - 1)) {
+ pkt_ctx->idx_eop = chan->src_sgl_freeidx;
+ pdesc->control_byte_count |= SOURCE_CONTROL_BD_EOP_BIT;
+ if ((seg->async_tx.flags & DMA_PREP_INTERRUPT) ==
+ DMA_PREP_INTERRUPT) {
+ pdesc->control_byte_count |=
+ SOURCE_CONTROL_BD_INTR_BIT;
+ }
+ }
+ chan->src_sgl_freeidx++;
+ if (chan->src_sgl_freeidx == chan->total_descriptors)
+ chan->src_sgl_freeidx = 0;
+ pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
+ spin_lock(&chan->src_desc_lock);
+ chan->src_avail_descriptors--;
+ spin_unlock(&chan->src_desc_lock);
+ i++;
+ }
+
+ chan->chan_base->src_q_limit = chan->src_sgl_freeidx;
+ chan->idx_ctx_srcq_head++;
+ if (chan->idx_ctx_srcq_head == chan->total_descriptors)
+ chan->idx_ctx_srcq_head = 0;
+}
+
+static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ struct DEST_DMA_DESCRIPTOR *pdesc;
+ struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
+ struct ps_pcie_transfer_elements *ele = NULL;
+ u32 i = 0;
+
+ pkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_head;
+ if (pkt_ctx->availability_status == IN_USE) {
+ dev_err(chan->dev,
+ "dst pkt context not avail for channel %d\n",
+ chan->channel_number);
+ handle_error(chan);
+
+ return;
+ }
+
+ pkt_ctx->availability_status = IN_USE;
+
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI)
+ pkt_ctx->seg = seg;
+
+ pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
+ pkt_ctx->idx_sop = chan->dst_sgl_freeidx;
+
+ /* Build transactions using information in the scatter gather list */
+ list_for_each_entry(ele, &seg->transfer_nodes, node) {
+ if (chan->xdev->dma_buf_ext_addr) {
+ pdesc->system_address =
+ (u64)ele->dst_pa;
+ } else {
+ pdesc->system_address =
+ (u32)ele->dst_pa;
+ }
+ pdesc->control_byte_count = (ele->transfer_bytes &
+ SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
+ chan->write_attribute;
+
+ pdesc->user_handle = chan->idx_ctx_dstq_head;
+ /* Check if this is last descriptor */
+ if (i == (seg->dst_elements - 1))
+ pkt_ctx->idx_eop = chan->dst_sgl_freeidx;
+ chan->dst_sgl_freeidx++;
+ if (chan->dst_sgl_freeidx == chan->total_descriptors)
+ chan->dst_sgl_freeidx = 0;
+ pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
+ spin_lock(&chan->dst_desc_lock);
+ chan->dst_avail_descriptors--;
+ spin_unlock(&chan->dst_desc_lock);
+ i++;
+ }
+
+ chan->chan_base->dst_q_limit = chan->dst_sgl_freeidx;
+ chan->idx_ctx_dstq_head++;
+ if (chan->idx_ctx_dstq_head == chan->total_descriptors)
+ chan->idx_ctx_dstq_head = 0;
+}
+
+static void ps_pcie_chan_program_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan,
+ handle_chan_programming);
+ struct ps_pcie_tx_segment *seg = NULL;
+
+ while (chan->state == CHANNEL_AVAILABLE) {
+ spin_lock(&chan->active_list_lock);
+ seg = list_first_entry_or_null(&chan->active_list,
+ struct ps_pcie_tx_segment, node);
+ spin_unlock(&chan->active_list_lock);
+
+ if (!seg)
+ break;
+
+ if (check_descriptor_availability(chan, seg) == false)
+ break;
+
+ spin_lock(&chan->active_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->active_list_lock);
+
+ if (seg->src_elements)
+ xlnx_ps_pcie_update_srcq(chan, seg);
+
+ if (seg->dst_elements)
+ xlnx_ps_pcie_update_dstq(chan, seg);
+ }
+}
+
+/**
+ * dst_cleanup_work - Goes through all completed elements in status Q
+ * and invokes callbacks for the concerned DMA transaction.
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void dst_cleanup_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_dstq_desc_cleanup);
+
+ struct STATUS_DMA_DESCRIPTOR *psta_bd;
+ struct DEST_DMA_DESCRIPTOR *pdst_bd;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
+ struct dmaengine_result rslt;
+ u32 completed_bytes;
+ u32 dstq_desc_idx;
+ struct ps_pcie_transfer_elements *ele, *ele_nxt;
+
+ psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
+
+ while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_DESTINATION_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Destination Err",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Source Error",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_INTERNAL_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Internal Error",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ /* we are using 64 bit USER field. */
+ if ((psta_bd->status_flag_byte_count &
+ STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d for chan %d has NON ZERO",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+
+ chan->idx_ctx_dstq_tail = psta_bd->user_handle;
+ ppkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_tail;
+ completed_bytes = (psta_bd->status_flag_byte_count &
+ STA_BD_BYTE_COUNT_MASK) >>
+ STA_BD_BYTE_COUNT_SHIFT;
+
+ memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
+
+ chan->dst_staprobe_idx++;
+
+ if (chan->dst_staprobe_idx == chan->total_descriptors)
+ chan->dst_staprobe_idx = 0;
+
+ chan->dst_sta_hw_probe_idx++;
+
+ if (chan->dst_sta_hw_probe_idx == chan->total_descriptors)
+ chan->dst_sta_hw_probe_idx = 0;
+
+ chan->chan_base->stad_q_limit = chan->dst_sta_hw_probe_idx;
+
+ psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
+
+ dstq_desc_idx = ppkt_ctx->idx_sop;
+
+ do {
+ pdst_bd = chan->pdst_sgl_bd + dstq_desc_idx;
+ memset(pdst_bd, 0,
+ sizeof(struct DEST_DMA_DESCRIPTOR));
+
+ spin_lock(&chan->dst_desc_lock);
+ chan->dst_avail_descriptors++;
+ spin_unlock(&chan->dst_desc_lock);
+
+ if (dstq_desc_idx == ppkt_ctx->idx_eop)
+ break;
+
+ dstq_desc_idx++;
+
+ if (dstq_desc_idx == chan->total_descriptors)
+ dstq_desc_idx = 0;
+
+ } while (1);
+
+ /* Invoking callback */
+ if (ppkt_ctx->seg) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&ppkt_ctx->seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+ rslt.result = DMA_TRANS_NOERROR;
+ rslt.residue = ppkt_ctx->seg->total_transfer_bytes -
+ completed_bytes;
+ dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
+ &rslt);
+ list_for_each_entry_safe(ele, ele_nxt,
+ &ppkt_ctx->seg->transfer_nodes,
+ node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(ppkt_ctx->seg, chan->transactions_pool);
+ }
+ memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
+ }
+
+ complete(&chan->dstq_work_complete);
+}
+
+/**
+ * src_cleanup_work - Goes through all completed elements in status Q and
+ * invokes callbacks for the concerned DMA transaction.
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void src_cleanup_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(
+ work, struct ps_pcie_dma_chan, handle_srcq_desc_cleanup);
+
+ struct STATUS_DMA_DESCRIPTOR *psta_bd;
+ struct SOURCE_DMA_DESCRIPTOR *psrc_bd;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
+ struct dmaengine_result rslt;
+ u32 completed_bytes;
+ u32 srcq_desc_idx;
+ struct ps_pcie_transfer_elements *ele, *ele_nxt;
+
+ psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
+
+ while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_DESTINATION_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Dst Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Source Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_INTERNAL_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Internal Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if ((psta_bd->status_flag_byte_count
+ & STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has NonZero",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ chan->idx_ctx_srcq_tail = psta_bd->user_handle;
+ ppkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_tail;
+ completed_bytes = (psta_bd->status_flag_byte_count
+ & STA_BD_BYTE_COUNT_MASK) >>
+ STA_BD_BYTE_COUNT_SHIFT;
+
+ memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
+
+ chan->src_staprobe_idx++;
+
+ if (chan->src_staprobe_idx == chan->total_descriptors)
+ chan->src_staprobe_idx = 0;
+
+ chan->src_sta_hw_probe_idx++;
+
+ if (chan->src_sta_hw_probe_idx == chan->total_descriptors)
+ chan->src_sta_hw_probe_idx = 0;
+
+ chan->chan_base->stas_q_limit = chan->src_sta_hw_probe_idx;
+
+ psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
+
+ srcq_desc_idx = ppkt_ctx->idx_sop;
+
+ do {
+ psrc_bd = chan->psrc_sgl_bd + srcq_desc_idx;
+ memset(psrc_bd, 0,
+ sizeof(struct SOURCE_DMA_DESCRIPTOR));
+
+ spin_lock(&chan->src_desc_lock);
+ chan->src_avail_descriptors++;
+ spin_unlock(&chan->src_desc_lock);
+
+ if (srcq_desc_idx == ppkt_ctx->idx_eop)
+ break;
+ srcq_desc_idx++;
+
+ if (srcq_desc_idx == chan->total_descriptors)
+ srcq_desc_idx = 0;
+
+ } while (1);
+
+ /* Invoking callback */
+ if (ppkt_ctx->seg) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&ppkt_ctx->seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+ rslt.result = DMA_TRANS_NOERROR;
+ rslt.residue = ppkt_ctx->seg->total_transfer_bytes -
+ completed_bytes;
+ dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
+ &rslt);
+ list_for_each_entry_safe(ele, ele_nxt,
+ &ppkt_ctx->seg->transfer_nodes,
+ node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(ppkt_ctx->seg, chan->transactions_pool);
+ }
+ memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
+ }
+
+ complete(&chan->srcq_work_complete);
+}
+
+/**
+ * ps_pcie_chan_primary_work - Masks out interrupts, invokes source Q and
+ * destination Q processing. Waits for source Q and destination Q processing
+ * and re enables interrupts. Same work is invoked by timer if coalesce count
+ * is greater than zero and interrupts are not invoked before the timeout period
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void ps_pcie_chan_primary_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(
+ work, struct ps_pcie_dma_chan,
+ handle_primary_desc_cleanup);
+
+ /* Disable interrupts for Channel */
+ ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ if (chan->psrc_sgl_bd) {
+ reinit_completion(&chan->srcq_work_complete);
+ if (chan->srcq_desc_cleanup)
+ queue_work(chan->srcq_desc_cleanup,
+ &chan->handle_srcq_desc_cleanup);
+ }
+ if (chan->pdst_sgl_bd) {
+ reinit_completion(&chan->dstq_work_complete);
+ if (chan->dstq_desc_cleanup)
+ queue_work(chan->dstq_desc_cleanup,
+ &chan->handle_dstq_desc_cleanup);
+ }
+
+ if (chan->psrc_sgl_bd)
+ wait_for_completion_interruptible(&chan->srcq_work_complete);
+ if (chan->pdst_sgl_bd)
+ wait_for_completion_interruptible(&chan->dstq_work_complete);
+
+ /* Enable interrupts for channel */
+ ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ if (chan->chan_programming) {
+ queue_work(chan->chan_programming,
+ &chan->handle_chan_programming);
+ }
+
+ if (chan->coalesce_count > 0 && chan->poll_timer.function)
+ mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
+}
+
+static int read_rootdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ struct resource *r;
+
+ err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&platform_dev->dev, "Cannot set 64 bit DMA mask\n");
+ err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&platform_dev->dev, "DMA mask set error\n");
+ return err;
+ }
+ }
+
+ err = dma_set_coherent_mask(&platform_dev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&platform_dev->dev, "Cannot set 64 bit consistent DMA mask\n");
+ err = dma_set_coherent_mask(&platform_dev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&platform_dev->dev, "Cannot set consistent DMA mask\n");
+ return err;
+ }
+ }
+
+ r = platform_get_resource_byname(platform_dev, IORESOURCE_MEM,
+ "ps_pcie_regbase");
+ if (!r) {
+ dev_err(&platform_dev->dev,
+ "Unable to find memory resource for root dma\n");
+ return PTR_ERR(r);
+ }
+
+ xdev->reg_base = devm_ioremap_resource(&platform_dev->dev, r);
+ if (IS_ERR(xdev->reg_base)) {
+ dev_err(&platform_dev->dev, "ioresource error for root dma\n");
+ return PTR_ERR(xdev->reg_base);
+ }
+
+ xdev->platform_irq_vec =
+ platform_get_irq_byname(platform_dev,
+ "ps_pcie_rootdma_intr");
+ if (xdev->platform_irq_vec < 0) {
+ dev_err(&platform_dev->dev,
+ "Unable to get interrupt number for root dma\n");
+ return xdev->platform_irq_vec;
+ }
+
+ err = device_property_read_u16(&platform_dev->dev, "dma_vendorid",
+ &xdev->rootdma_vendor);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find RootDMA PCI Vendor Id\n");
+ return err;
+ }
+
+ err = device_property_read_u16(&platform_dev->dev, "dma_deviceid",
+ &xdev->rootdma_device);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find RootDMA PCI Device Id\n");
+ return err;
+ }
+
+ xdev->common.dev = xdev->dev;
+
+ return 0;
+}
+
+static int read_epdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ struct pci_dev *pdev;
+ u16 i;
+ void __iomem * const *pci_iomap;
+ unsigned long pci_bar_length;
+
+ pdev = *((struct pci_dev **)(platform_dev->dev.platform_data));
+ xdev->pci_dev = pdev;
+
+ for (i = 0; i < MAX_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ xdev->bar_mask = xdev->bar_mask | (1 << (i));
+ }
+
+ err = pcim_iomap_regions(pdev, xdev->bar_mask, PLATFORM_DRIVER_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot request PCI regions, aborting\n");
+ return err;
+ }
+
+ pci_iomap = pcim_iomap_table(pdev);
+ if (!pci_iomap) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ for (i = 0; i < MAX_BARS; i++) {
+ pci_bar_length = pci_resource_len(pdev, i);
+ if (pci_bar_length == 0) {
+ xdev->bar_info[i].BAR_LENGTH = 0;
+ xdev->bar_info[i].BAR_PHYS_ADDR = 0;
+ xdev->bar_info[i].BAR_VIRT_ADDR = NULL;
+ } else {
+ xdev->bar_info[i].BAR_LENGTH =
+ pci_bar_length;
+ xdev->bar_info[i].BAR_PHYS_ADDR =
+ pci_resource_start(pdev, i);
+ xdev->bar_info[i].BAR_VIRT_ADDR =
+ (void *)pci_iomap[i];
+ }
+ }
+
+ xdev->reg_base = pci_iomap[DMA_BAR_NUMBER];
+
+ err = irq_probe(xdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Cannot probe irq lines for device %d\n",
+ platform_dev->id);
+ return err;
+ }
+
+ xdev->common.dev = &pdev->dev;
+
+ return 0;
+}
+
+static int probe_channel_properties(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev,
+ u16 channel_number)
+{
+ int i;
+ char propertyname[CHANNEL_PROPERTY_LENGTH];
+ int numvals, ret;
+ u32 *val;
+ struct ps_pcie_dma_chan *channel;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+
+ snprintf(propertyname, CHANNEL_PROPERTY_LENGTH,
+ "ps_pcie_channel%d", channel_number);
+
+ channel = &xdev->channels[channel_number];
+
+ spin_lock_init(&channel->channel_lock);
+ spin_lock_init(&channel->cookie_lock);
+
+ INIT_LIST_HEAD(&channel->pending_list);
+ spin_lock_init(&channel->pending_list_lock);
+
+ INIT_LIST_HEAD(&channel->active_list);
+ spin_lock_init(&channel->active_list_lock);
+
+ spin_lock_init(&channel->src_desc_lock);
+ spin_lock_init(&channel->dst_desc_lock);
+
+ INIT_LIST_HEAD(&channel->pending_interrupts_list);
+ spin_lock_init(&channel->pending_interrupts_lock);
+
+ INIT_LIST_HEAD(&channel->active_interrupts_list);
+ spin_lock_init(&channel->active_interrupts_lock);
+
+ init_completion(&channel->srcq_work_complete);
+ init_completion(&channel->dstq_work_complete);
+ init_completion(&channel->chan_shutdown_complt);
+ init_completion(&channel->chan_terminate_complete);
+
+ if (device_property_present(&platform_dev->dev, propertyname)) {
+ numvals = device_property_read_u32_array(&platform_dev->dev,
+ propertyname, NULL, 0);
+
+ if (numvals < 0)
+ return numvals;
+
+ val = devm_kzalloc(&platform_dev->dev, sizeof(u32) * numvals,
+ GFP_KERNEL);
+
+ if (!val)
+ return -ENOMEM;
+
+ ret = device_property_read_u32_array(&platform_dev->dev,
+ propertyname, val,
+ numvals);
+ if (ret < 0) {
+ dev_err(&platform_dev->dev,
+ "Unable to read property %s\n", propertyname);
+ return ret;
+ }
+
+ for (i = 0; i < numvals; i++) {
+ switch (i) {
+ case DMA_CHANNEL_DIRECTION:
+ channel->direction =
+ (val[DMA_CHANNEL_DIRECTION] ==
+ PCIE_AXI_DIRECTION) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ break;
+ case NUM_DESCRIPTORS:
+ channel->total_descriptors =
+ val[NUM_DESCRIPTORS];
+ if (channel->total_descriptors >
+ MAX_DESCRIPTORS) {
+ dev_info(&platform_dev->dev,
+ "Descriptors > alowd max\n");
+ channel->total_descriptors =
+ MAX_DESCRIPTORS;
+ }
+ break;
+ case NUM_QUEUES:
+ channel->num_queues = val[NUM_QUEUES];
+ switch (channel->num_queues) {
+ case DEFAULT_DMA_QUEUES:
+ break;
+ case TWO_DMA_QUEUES:
+ break;
+ default:
+ dev_info(&platform_dev->dev,
+ "Incorrect Q number for dma chan\n");
+ channel->num_queues = DEFAULT_DMA_QUEUES;
+ }
+ break;
+ case COALESE_COUNT:
+ channel->coalesce_count = val[COALESE_COUNT];
+
+ if (channel->coalesce_count >
+ MAX_COALESCE_COUNT) {
+ dev_info(&platform_dev->dev,
+ "Invalid coalesce Count\n");
+ channel->coalesce_count =
+ MAX_COALESCE_COUNT;
+ }
+ break;
+ case POLL_TIMER_FREQUENCY:
+ channel->poll_timer_freq =
+ val[POLL_TIMER_FREQUENCY];
+ break;
+ default:
+ dev_err(&platform_dev->dev,
+ "Check order of channel properties!\n");
+ }
+ }
+ } else {
+ dev_err(&platform_dev->dev,
+ "Property %s not present. Invalid configuration!\n",
+ propertyname);
+ return -ENOTSUPP;
+ }
+
+ if (channel->direction == DMA_TO_DEVICE) {
+ if (channel->num_queues == DEFAULT_DMA_QUEUES) {
+ channel->srcq_buffer_location = BUFFER_LOC_PCI;
+ channel->dstq_buffer_location = BUFFER_LOC_AXI;
+ } else {
+ channel->srcq_buffer_location = BUFFER_LOC_PCI;
+ channel->dstq_buffer_location = BUFFER_LOC_INVALID;
+ }
+ } else {
+ if (channel->num_queues == DEFAULT_DMA_QUEUES) {
+ channel->srcq_buffer_location = BUFFER_LOC_AXI;
+ channel->dstq_buffer_location = BUFFER_LOC_PCI;
+ } else {
+ channel->srcq_buffer_location = BUFFER_LOC_INVALID;
+ channel->dstq_buffer_location = BUFFER_LOC_PCI;
+ }
+ }
+
+ channel->xdev = xdev;
+ channel->channel_number = channel_number;
+
+ if (xdev->is_rootdma) {
+ channel->dev = xdev->dev;
+ channel->intr_status_offset = DMA_AXI_INTR_STATUS_REG_OFFSET;
+ channel->intr_control_offset = DMA_AXI_INTR_CNTRL_REG_OFFSET;
+ } else {
+ channel->dev = &xdev->pci_dev->dev;
+ channel->intr_status_offset = DMA_PCIE_INTR_STATUS_REG_OFFSET;
+ channel->intr_control_offset = DMA_PCIE_INTR_CNTRL_REG_OFFSET;
+ }
+
+ channel->chan_base =
+ (struct DMA_ENGINE_REGISTERS *)((__force char *)(xdev->reg_base) +
+ (channel_number * DMA_CHANNEL_REGS_SIZE));
+
+ if ((channel->chan_base->dma_channel_status &
+ DMA_STATUS_DMA_PRES_BIT) == 0) {
+ dev_err(&platform_dev->dev,
+ "Hardware reports channel not present\n");
+ return -ENOTSUPP;
+ }
+
+ update_channel_read_attribute(channel);
+ update_channel_write_attribute(channel);
+
+ xlnx_match = devm_kzalloc(&platform_dev->dev,
+ sizeof(struct ps_pcie_dma_channel_match),
+ GFP_KERNEL);
+
+ if (!xlnx_match)
+ return -ENOMEM;
+
+ if (xdev->is_rootdma) {
+ xlnx_match->pci_vendorid = xdev->rootdma_vendor;
+ xlnx_match->pci_deviceid = xdev->rootdma_device;
+ } else {
+ xlnx_match->pci_vendorid = xdev->pci_dev->vendor;
+ xlnx_match->pci_deviceid = xdev->pci_dev->device;
+ xlnx_match->bar_params = xdev->bar_info;
+ }
+
+ xlnx_match->board_number = xdev->board_number;
+ xlnx_match->channel_number = channel_number;
+ xlnx_match->direction = xdev->channels[channel_number].direction;
+
+ channel->common.private = (void *)xlnx_match;
+
+ channel->common.device = &xdev->common;
+ list_add_tail(&channel->common.device_node, &xdev->common.channels);
+
+ return 0;
+}
+
+static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan)
+{
+ mempool_destroy(chan->transactions_pool);
+
+ mempool_destroy(chan->tx_elements_pool);
+
+ mempool_destroy(chan->intr_transactions_pool);
+}
+
+static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->maintenance_workq)
+ destroy_workqueue(chan->maintenance_workq);
+
+ if (chan->sw_intrs_wrkq)
+ destroy_workqueue(chan->sw_intrs_wrkq);
+
+ if (chan->srcq_desc_cleanup)
+ destroy_workqueue(chan->srcq_desc_cleanup);
+
+ if (chan->dstq_desc_cleanup)
+ destroy_workqueue(chan->dstq_desc_cleanup);
+
+ if (chan->chan_programming)
+ destroy_workqueue(chan->chan_programming);
+
+ if (chan->primary_desc_cleanup)
+ destroy_workqueue(chan->primary_desc_cleanup);
+}
+
+static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan)
+{
+ kfree(chan->ppkt_ctx_srcq);
+
+ kfree(chan->ppkt_ctx_dstq);
+}
+
+static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan)
+{
+ ssize_t size;
+
+ if (chan->psrc_sgl_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
+ chan->src_sgl_bd_pa);
+ }
+
+ if (chan->pdst_sgl_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct DEST_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
+ chan->dst_sgl_bd_pa);
+ }
+
+ if (chan->psrc_sta_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
+ chan->src_sta_bd_pa);
+ }
+
+ if (chan->pdst_sta_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sta_bd,
+ chan->dst_sta_bd_pa);
+ }
+}
+
+static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan)
+{
+ u32 reg = chan->coalesce_count;
+
+ reg = reg << DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT;
+
+ /* Enable Interrupts for channel */
+ ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
+ reg | DMA_INTCNTRL_ENABLINTR_BIT |
+ DMA_INTCNTRL_DMAERRINTR_BIT |
+ DMA_INTCNTRL_DMASGINTR_BIT);
+
+ /* Enable DMA */
+ ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET,
+ DMA_CNTRL_ENABL_BIT |
+ DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_AVAILABLE;
+ spin_unlock(&chan->channel_lock);
+
+ /* Activate timer if required */
+ if (chan->coalesce_count > 0 && !chan->poll_timer.function)
+ xlnx_ps_pcie_alloc_poll_timer(chan);
+
+ return 0;
+}
+
+static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan)
+{
+ /* Disable interrupts for Channel */
+ ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ /* Delete timer if it is created */
+ if (chan->coalesce_count > 0 && !chan->poll_timer.function)
+ xlnx_ps_pcie_free_poll_timer(chan);
+
+ /* Flush descriptor cleaning work queues */
+ if (chan->primary_desc_cleanup)
+ flush_workqueue(chan->primary_desc_cleanup);
+
+ /* Flush channel programming work queue */
+ if (chan->chan_programming)
+ flush_workqueue(chan->chan_programming);
+
+ /* Clear the persistent bits */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_DMAERR_BIT |
+ DMA_INTSTATUS_SGLINTR_BIT |
+ DMA_INTSTATUS_SWINTR_BIT);
+
+ /* Disable DMA channel */
+ ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_ENABL_BIT);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_UNAVIALBLE;
+ spin_unlock(&chan->channel_lock);
+}
+
+static void ivk_cbk_intr_seg(struct ps_pcie_intr_segment *intr_seg,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ struct dmaengine_result rslt;
+
+ rslt.result = result;
+ rslt.residue = 0;
+
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&intr_seg->async_intr_tx);
+ spin_unlock(&chan->cookie_lock);
+
+ dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx, &rslt);
+}
+
+static void ivk_cbk_seg(struct ps_pcie_tx_segment *seg,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ struct dmaengine_result rslt, *prslt;
+
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+
+ rslt.result = result;
+ if (seg->src_elements &&
+ chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ rslt.residue = seg->total_transfer_bytes;
+ prslt = &rslt;
+ } else if (seg->dst_elements &&
+ chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ rslt.residue = seg->total_transfer_bytes;
+ prslt = &rslt;
+ } else {
+ prslt = NULL;
+ }
+
+ dmaengine_desc_get_callback_invoke(&seg->async_tx, prslt);
+}
+
+static void ivk_cbk_ctx(struct PACKET_TRANSFER_PARAMS *ppkt_ctxt,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ if (ppkt_ctxt->availability_status == IN_USE) {
+ if (ppkt_ctxt->seg) {
+ ivk_cbk_seg(ppkt_ctxt->seg, chan, result);
+ mempool_free(ppkt_ctxt->seg,
+ chan->transactions_pool);
+ }
+ }
+}
+
+static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan)
+{
+ int i;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctxt;
+ struct ps_pcie_tx_segment *seg, *seg_nxt;
+ struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
+ struct ps_pcie_transfer_elements *ele, *ele_nxt;
+
+ if (chan->ppkt_ctx_srcq) {
+ if (chan->idx_ctx_srcq_tail != chan->idx_ctx_srcq_head) {
+ i = chan->idx_ctx_srcq_tail;
+ while (i != chan->idx_ctx_srcq_head) {
+ ppkt_ctxt = chan->ppkt_ctx_srcq + i;
+ ivk_cbk_ctx(ppkt_ctxt, chan,
+ DMA_TRANS_READ_FAILED);
+ memset(ppkt_ctxt, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS));
+ i++;
+ if (i == chan->total_descriptors)
+ i = 0;
+ }
+ }
+ }
+
+ if (chan->ppkt_ctx_dstq) {
+ if (chan->idx_ctx_dstq_tail != chan->idx_ctx_dstq_head) {
+ i = chan->idx_ctx_dstq_tail;
+ while (i != chan->idx_ctx_dstq_head) {
+ ppkt_ctxt = chan->ppkt_ctx_dstq + i;
+ ivk_cbk_ctx(ppkt_ctxt, chan,
+ DMA_TRANS_WRITE_FAILED);
+ memset(ppkt_ctxt, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS));
+ i++;
+ if (i == chan->total_descriptors)
+ i = 0;
+ }
+ }
+ }
+
+ list_for_each_entry_safe(seg, seg_nxt, &chan->active_list, node) {
+ ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->active_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->active_list_lock);
+ list_for_each_entry_safe(ele, ele_nxt,
+ &seg->transfer_nodes, node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(seg, chan->transactions_pool);
+ }
+
+ list_for_each_entry_safe(seg, seg_nxt, &chan->pending_list, node) {
+ ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->pending_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->pending_list_lock);
+ list_for_each_entry_safe(ele, ele_nxt,
+ &seg->transfer_nodes, node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(seg, chan->transactions_pool);
+ }
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->active_interrupts_list, node) {
+ ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->active_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->active_interrupts_lock);
+ mempool_free(intr_seg, chan->intr_transactions_pool);
+ }
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->pending_interrupts_list, node) {
+ ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->pending_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->pending_interrupts_lock);
+ mempool_free(intr_seg, chan->intr_transactions_pool);
+ }
+}
+
+static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan)
+{
+ xlnx_ps_pcie_channel_quiesce(chan);
+
+ ivk_cbk_for_pending(chan);
+
+ ps_pcie_chan_reset(chan);
+
+ init_sw_components(chan);
+ init_hw_components(chan);
+
+ xlnx_ps_pcie_channel_activate(chan);
+}
+
+static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->poll_timer.function) {
+ del_timer_sync(&chan->poll_timer);
+ chan->poll_timer.function = NULL;
+ }
+}
+
+static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan)
+{
+ timer_setup(&chan->poll_timer, poll_completed_transactions, 0);
+ chan->poll_timer.expires = jiffies + chan->poll_timer_freq;
+
+ add_timer(&chan->poll_timer);
+
+ return 0;
+}
+
+static void terminate_transactions_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_terminate);
+
+ xlnx_ps_pcie_channel_quiesce(chan);
+ ivk_cbk_for_pending(chan);
+ xlnx_ps_pcie_channel_activate(chan);
+
+ complete(&chan->chan_terminate_complete);
+}
+
+static void chan_shutdown_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_shutdown);
+
+ xlnx_ps_pcie_channel_quiesce(chan);
+
+ complete(&chan->chan_shutdown_complt);
+}
+
+static void chan_reset_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_reset);
+
+ xlnx_ps_pcie_reset_channel(chan);
+}
+
+static void sw_intr_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_sw_intrs);
+ struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->active_interrupts_list, node) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&intr_seg->async_intr_tx);
+ spin_unlock(&chan->cookie_lock);
+ dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx,
+ NULL);
+ spin_lock(&chan->active_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->active_interrupts_lock);
+ }
+}
+
+static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan)
+{
+ char wq_name[WORKQ_NAME_SIZE];
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d descriptor programming wq",
+ chan->channel_number);
+ chan->chan_programming =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->chan_programming) {
+ dev_err(chan->dev,
+ "Unable to create programming wq for chan %d",
+ chan->channel_number);
+ goto err_no_desc_program_wq;
+ } else {
+ INIT_WORK(&chan->handle_chan_programming,
+ ps_pcie_chan_program_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d primary cleanup wq", chan->channel_number);
+ chan->primary_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->primary_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create primary cleanup wq for channel %d",
+ chan->channel_number);
+ goto err_no_primary_clean_wq;
+ } else {
+ INIT_WORK(&chan->handle_primary_desc_cleanup,
+ ps_pcie_chan_primary_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d maintenance works wq",
+ chan->channel_number);
+ chan->maintenance_workq =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->maintenance_workq) {
+ dev_err(chan->dev,
+ "Unable to create maintenance wq for channel %d",
+ chan->channel_number);
+ goto err_no_maintenance_wq;
+ } else {
+ INIT_WORK(&chan->handle_chan_reset, chan_reset_work);
+ INIT_WORK(&chan->handle_chan_shutdown, chan_shutdown_work);
+ INIT_WORK(&chan->handle_chan_terminate,
+ terminate_transactions_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d software Interrupts wq",
+ chan->channel_number);
+ chan->sw_intrs_wrkq =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->sw_intrs_wrkq) {
+ dev_err(chan->dev,
+ "Unable to create sw interrupts wq for channel %d",
+ chan->channel_number);
+ goto err_no_sw_intrs_wq;
+ } else {
+ INIT_WORK(&chan->handle_sw_intrs, sw_intr_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ if (chan->psrc_sgl_bd) {
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d srcq handling wq",
+ chan->channel_number);
+ chan->srcq_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->srcq_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create src q completion wq chan %d",
+ chan->channel_number);
+ goto err_no_src_q_completion_wq;
+ } else {
+ INIT_WORK(&chan->handle_srcq_desc_cleanup,
+ src_cleanup_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+ }
+
+ if (chan->pdst_sgl_bd) {
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d dstq handling wq",
+ chan->channel_number);
+ chan->dstq_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->dstq_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create dst q completion wq chan %d",
+ chan->channel_number);
+ goto err_no_dst_q_completion_wq;
+ } else {
+ INIT_WORK(&chan->handle_dstq_desc_cleanup,
+ dst_cleanup_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+ }
+
+ return 0;
+err_no_dst_q_completion_wq:
+ if (chan->srcq_desc_cleanup)
+ destroy_workqueue(chan->srcq_desc_cleanup);
+err_no_src_q_completion_wq:
+ if (chan->sw_intrs_wrkq)
+ destroy_workqueue(chan->sw_intrs_wrkq);
+err_no_sw_intrs_wq:
+ if (chan->maintenance_workq)
+ destroy_workqueue(chan->maintenance_workq);
+err_no_maintenance_wq:
+ if (chan->primary_desc_cleanup)
+ destroy_workqueue(chan->primary_desc_cleanup);
+err_no_primary_clean_wq:
+ if (chan->chan_programming)
+ destroy_workqueue(chan->chan_programming);
+err_no_desc_program_wq:
+ return -ENOMEM;
+}
+
+static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan)
+{
+ chan->transactions_pool =
+ mempool_create_kmalloc_pool(chan->total_descriptors,
+ sizeof(struct ps_pcie_tx_segment));
+
+ if (!chan->transactions_pool)
+ goto no_transactions_pool;
+
+ chan->tx_elements_pool =
+ mempool_create_kmalloc_pool(chan->total_descriptors,
+ sizeof(struct ps_pcie_transfer_elements));
+
+ if (!chan->tx_elements_pool)
+ goto no_tx_elements_pool;
+
+ chan->intr_transactions_pool =
+ mempool_create_kmalloc_pool(MIN_SW_INTR_TRANSACTIONS,
+ sizeof(struct ps_pcie_intr_segment));
+
+ if (!chan->intr_transactions_pool)
+ goto no_intr_transactions_pool;
+
+ return 0;
+
+no_intr_transactions_pool:
+ mempool_destroy(chan->tx_elements_pool);
+no_tx_elements_pool:
+ mempool_destroy(chan->transactions_pool);
+no_transactions_pool:
+ return -ENOMEM;
+}
+
+static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->psrc_sgl_bd) {
+ chan->ppkt_ctx_srcq =
+ kcalloc(chan->total_descriptors,
+ sizeof(struct PACKET_TRANSFER_PARAMS),
+ GFP_KERNEL);
+ if (!chan->ppkt_ctx_srcq) {
+ dev_err(chan->dev,
+ "Src pkt cxt allocation for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_src_pkt_ctx;
+ }
+ }
+
+ if (chan->pdst_sgl_bd) {
+ chan->ppkt_ctx_dstq =
+ kcalloc(chan->total_descriptors,
+ sizeof(struct PACKET_TRANSFER_PARAMS),
+ GFP_KERNEL);
+ if (!chan->ppkt_ctx_dstq) {
+ dev_err(chan->dev,
+ "Dst pkt cxt for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_dst_pkt_ctx;
+ }
+ }
+
+ return 0;
+
+err_no_dst_pkt_ctx:
+ kfree(chan->ppkt_ctx_srcq);
+
+err_no_src_pkt_ctx:
+ return -ENOMEM;
+}
+
+static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan)
+{
+ size_t size;
+
+ void *sgl_base;
+ void *sta_base;
+ dma_addr_t phy_addr_sglbase;
+ dma_addr_t phy_addr_stabase;
+
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+
+ sgl_base = dma_alloc_coherent(chan->dev, size, &phy_addr_sglbase,
+ GFP_KERNEL);
+
+ if (!sgl_base) {
+ dev_err(chan->dev,
+ "Sgl bds in two channel mode for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_sgl_bds;
+ }
+
+ size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
+ sta_base = dma_alloc_coherent(chan->dev, size, &phy_addr_stabase,
+ GFP_KERNEL);
+
+ if (!sta_base) {
+ dev_err(chan->dev,
+ "Sta bds in two channel mode for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_sta_bds;
+ }
+
+ if (chan->direction == DMA_TO_DEVICE) {
+ chan->psrc_sgl_bd = sgl_base;
+ chan->src_sgl_bd_pa = phy_addr_sglbase;
+
+ chan->psrc_sta_bd = sta_base;
+ chan->src_sta_bd_pa = phy_addr_stabase;
+
+ chan->pdst_sgl_bd = NULL;
+ chan->dst_sgl_bd_pa = 0;
+
+ chan->pdst_sta_bd = NULL;
+ chan->dst_sta_bd_pa = 0;
+
+ } else if (chan->direction == DMA_FROM_DEVICE) {
+ chan->psrc_sgl_bd = NULL;
+ chan->src_sgl_bd_pa = 0;
+
+ chan->psrc_sta_bd = NULL;
+ chan->src_sta_bd_pa = 0;
+
+ chan->pdst_sgl_bd = sgl_base;
+ chan->dst_sgl_bd_pa = phy_addr_sglbase;
+
+ chan->pdst_sta_bd = sta_base;
+ chan->dst_sta_bd_pa = phy_addr_stabase;
+
+ } else {
+ dev_err(chan->dev,
+ "%d %s() Unsupported channel direction\n",
+ __LINE__, __func__);
+ goto unsupported_channel_direction;
+ }
+
+ return 0;
+
+unsupported_channel_direction:
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, sta_base, phy_addr_stabase);
+err_no_sta_bds:
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, sgl_base, phy_addr_sglbase);
+err_no_sgl_bds:
+
+ return -ENOMEM;
+}
+
+static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan)
+{
+ size_t size;
+
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ chan->psrc_sgl_bd =
+ dma_alloc_coherent(chan->dev, size, &chan->src_sgl_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->psrc_sgl_bd) {
+ dev_err(chan->dev,
+ "Alloc fail src q buffer descriptors for chan %d\n",
+ chan->channel_number);
+ goto err_no_src_sgl_descriptors;
+ }
+
+ size = chan->total_descriptors * sizeof(struct DEST_DMA_DESCRIPTOR);
+ chan->pdst_sgl_bd =
+ dma_alloc_coherent(chan->dev, size, &chan->dst_sgl_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->pdst_sgl_bd) {
+ dev_err(chan->dev,
+ "Alloc fail dst q buffer descriptors for chan %d\n",
+ chan->channel_number);
+ goto err_no_dst_sgl_descriptors;
+ }
+
+ size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
+ chan->psrc_sta_bd =
+ dma_alloc_coherent(chan->dev, size, &chan->src_sta_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->psrc_sta_bd) {
+ dev_err(chan->dev,
+ "Unable to allocate src q status bds for chan %d\n",
+ chan->channel_number);
+ goto err_no_src_sta_descriptors;
+ }
+
+ chan->pdst_sta_bd =
+ dma_alloc_coherent(chan->dev, size, &chan->dst_sta_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->pdst_sta_bd) {
+ dev_err(chan->dev,
+ "Unable to allocate Dst q status bds for chan %d\n",
+ chan->channel_number);
+ goto err_no_dst_sta_descriptors;
+ }
+
+ return 0;
+
+err_no_dst_sta_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
+ chan->src_sta_bd_pa);
+err_no_src_sta_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct DEST_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
+ chan->dst_sgl_bd_pa);
+err_no_dst_sgl_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
+ chan->src_sgl_bd_pa);
+
+err_no_src_sgl_descriptors:
+ return -ENOMEM;
+}
+
+static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!dchan)
+ return;
+
+ chan = to_xilinx_chan(dchan);
+
+ if (chan->state == CHANNEL_RESOURCE_UNALLOCATED)
+ return;
+
+ if (chan->maintenance_workq) {
+ if (completion_done(&chan->chan_shutdown_complt))
+ reinit_completion(&chan->chan_shutdown_complt);
+ queue_work(chan->maintenance_workq,
+ &chan->handle_chan_shutdown);
+ wait_for_completion_interruptible(&chan->chan_shutdown_complt);
+
+ xlnx_ps_pcie_free_worker_queues(chan);
+ xlnx_ps_pcie_free_pkt_ctxts(chan);
+ xlnx_ps_pcie_destroy_mempool(chan);
+ xlnx_ps_pcie_free_descriptors(chan);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_RESOURCE_UNALLOCATED;
+ spin_unlock(&chan->channel_lock);
+ }
+}
+
+static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!dchan)
+ return PTR_ERR(dchan);
+
+ chan = to_xilinx_chan(dchan);
+
+ if (chan->state != CHANNEL_RESOURCE_UNALLOCATED)
+ return 0;
+
+ if (chan->num_queues == DEFAULT_DMA_QUEUES) {
+ if (dma_alloc_decriptors_all_queues(chan) != 0) {
+ dev_err(chan->dev,
+ "Alloc fail bds for channel %d\n",
+ chan->channel_number);
+ goto err_no_descriptors;
+ }
+ } else if (chan->num_queues == TWO_DMA_QUEUES) {
+ if (dma_alloc_descriptors_two_queues(chan) != 0) {
+ dev_err(chan->dev,
+ "Alloc fail bds for two queues of channel %d\n",
+ chan->channel_number);
+ goto err_no_descriptors;
+ }
+ }
+
+ if (xlnx_ps_pcie_alloc_mempool(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate memory pool for channel %d\n",
+ chan->channel_number);
+ goto err_no_mempools;
+ }
+
+ if (xlnx_ps_pcie_alloc_pkt_contexts(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate packet contexts for channel %d\n",
+ chan->channel_number);
+ goto err_no_pkt_ctxts;
+ }
+
+ if (xlnx_ps_pcie_alloc_worker_threads(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate worker queues for channel %d\n",
+ chan->channel_number);
+ goto err_no_worker_queues;
+ }
+
+ xlnx_ps_pcie_reset_channel(chan);
+
+ dma_cookie_init(dchan);
+
+ return 0;
+
+err_no_worker_queues:
+ xlnx_ps_pcie_free_pkt_ctxts(chan);
+err_no_pkt_ctxts:
+ xlnx_ps_pcie_destroy_mempool(chan);
+err_no_mempools:
+ xlnx_ps_pcie_free_descriptors(chan);
+err_no_descriptors:
+ return -ENOMEM;
+}
+
+static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ps_pcie_intr_segment *intr_seg =
+ to_ps_pcie_dma_tx_intr_descriptor(tx);
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return -EINVAL;
+
+ spin_lock(&chan->cookie_lock);
+ cookie = dma_cookie_assign(tx);
+ spin_unlock(&chan->cookie_lock);
+
+ spin_lock(&chan->pending_interrupts_lock);
+ list_add_tail(&intr_seg->node, &chan->pending_interrupts_list);
+ spin_unlock(&chan->pending_interrupts_lock);
+
+ return cookie;
+}
+
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ps_pcie_tx_segment *seg = to_ps_pcie_dma_tx_descriptor(tx);
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return -EINVAL;
+
+ spin_lock(&chan->cookie_lock);
+ cookie = dma_cookie_assign(tx);
+ spin_unlock(&chan->cookie_lock);
+
+ spin_lock(&chan->pending_list_lock);
+ list_add_tail(&seg->node, &chan->pending_list);
+ spin_unlock(&chan->pending_list_lock);
+
+ return cookie;
+}
+
+/**
+ * xlnx_ps_pcie_dma_prep_memcpy - prepare descriptors for a memcpy transaction
+ * @channel: DMA channel
+ * @dma_dst: destination address
+ * @dma_src: source address
+ * @len: transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xlnx_ps_pcie_dma_prep_memcpy(struct dma_chan *channel, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len,
+ unsigned long flags)
+{
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
+ struct ps_pcie_tx_segment *seg = NULL;
+ struct ps_pcie_transfer_elements *ele = NULL;
+ struct ps_pcie_transfer_elements *ele_nxt = NULL;
+ u32 i;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ if (chan->num_queues != DEFAULT_DMA_QUEUES) {
+ dev_err(chan->dev, "Only prep_slave_sg for channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
+ if (!seg) {
+ dev_err(chan->dev, "Tx segment alloc for channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ memset(seg, 0, sizeof(*seg));
+ INIT_LIST_HEAD(&seg->transfer_nodes);
+
+ for (i = 0; i < len / MAX_TRANSFER_LENGTH; i++) {
+ ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
+ if (!ele) {
+ dev_err(chan->dev, "Tx element %d for channel %d\n",
+ i, chan->channel_number);
+ goto err_elements_prep_memcpy;
+ }
+ ele->src_pa = dma_src + (i * MAX_TRANSFER_LENGTH);
+ ele->dst_pa = dma_dst + (i * MAX_TRANSFER_LENGTH);
+ ele->transfer_bytes = MAX_TRANSFER_LENGTH;
+ list_add_tail(&ele->node, &seg->transfer_nodes);
+ seg->src_elements++;
+ seg->dst_elements++;
+ seg->total_transfer_bytes += ele->transfer_bytes;
+ ele = NULL;
+ }
+
+ if (len % MAX_TRANSFER_LENGTH) {
+ ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
+ if (!ele) {
+ dev_err(chan->dev, "Tx element %d for channel %d\n",
+ i, chan->channel_number);
+ goto err_elements_prep_memcpy;
+ }
+ ele->src_pa = dma_src + (i * MAX_TRANSFER_LENGTH);
+ ele->dst_pa = dma_dst + (i * MAX_TRANSFER_LENGTH);
+ ele->transfer_bytes = len % MAX_TRANSFER_LENGTH;
+ list_add_tail(&ele->node, &seg->transfer_nodes);
+ seg->src_elements++;
+ seg->dst_elements++;
+ seg->total_transfer_bytes += ele->transfer_bytes;
+ }
+
+ if (seg->src_elements > chan->total_descriptors) {
+ dev_err(chan->dev, "Insufficient descriptors in channel %d for dma transaction\n",
+ chan->channel_number);
+ goto err_elements_prep_memcpy;
+ }
+
+ dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
+ seg->async_tx.flags = flags;
+ async_tx_ack(&seg->async_tx);
+ seg->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ return &seg->async_tx;
+
+err_elements_prep_memcpy:
+ list_for_each_entry_safe(ele, ele_nxt, &seg->transfer_nodes, node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(seg, chan->transactions_pool);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
+ struct dma_chan *channel, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
+ struct ps_pcie_tx_segment *seg = NULL;
+ struct scatterlist *sgl_ptr;
+ struct ps_pcie_transfer_elements *ele = NULL;
+ struct ps_pcie_transfer_elements *ele_nxt = NULL;
+ u32 i, j;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ if (!(is_slave_direction(direction)))
+ return NULL;
+
+ if (!sgl || sg_len == 0)
+ return NULL;
+
+ if (chan->num_queues != TWO_DMA_QUEUES) {
+ dev_err(chan->dev, "Only prep_dma_memcpy is supported channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
+ if (!seg) {
+ dev_err(chan->dev, "Unable to allocate tx segment channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ memset(seg, 0, sizeof(*seg));
+
+ for_each_sg(sgl, sgl_ptr, sg_len, j) {
+ for (i = 0; i < sg_dma_len(sgl_ptr) / MAX_TRANSFER_LENGTH; i++) {
+ ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
+ if (!ele) {
+ dev_err(chan->dev, "Tx element %d for channel %d\n",
+ i, chan->channel_number);
+ goto err_elements_prep_slave_sg;
+ }
+ if (chan->direction == DMA_TO_DEVICE) {
+ ele->src_pa = sg_dma_address(sgl_ptr) +
+ (i * MAX_TRANSFER_LENGTH);
+ seg->src_elements++;
+ } else {
+ ele->dst_pa = sg_dma_address(sgl_ptr) +
+ (i * MAX_TRANSFER_LENGTH);
+ seg->dst_elements++;
+ }
+ ele->transfer_bytes = MAX_TRANSFER_LENGTH;
+ list_add_tail(&ele->node, &seg->transfer_nodes);
+ seg->total_transfer_bytes += ele->transfer_bytes;
+ ele = NULL;
+ }
+ if (sg_dma_len(sgl_ptr) % MAX_TRANSFER_LENGTH) {
+ ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
+ if (!ele) {
+ dev_err(chan->dev, "Tx element %d for channel %d\n",
+ i, chan->channel_number);
+ goto err_elements_prep_slave_sg;
+ }
+ if (chan->direction == DMA_TO_DEVICE) {
+ ele->src_pa = sg_dma_address(sgl_ptr) +
+ (i * MAX_TRANSFER_LENGTH);
+ seg->src_elements++;
+ } else {
+ ele->dst_pa = sg_dma_address(sgl_ptr) +
+ (i * MAX_TRANSFER_LENGTH);
+ seg->dst_elements++;
+ }
+ ele->transfer_bytes = sg_dma_len(sgl_ptr) %
+ MAX_TRANSFER_LENGTH;
+ list_add_tail(&ele->node, &seg->transfer_nodes);
+ seg->total_transfer_bytes += ele->transfer_bytes;
+ }
+ }
+
+ if (max(seg->src_elements, seg->dst_elements) >
+ chan->total_descriptors) {
+ dev_err(chan->dev, "Insufficient descriptors in channel %d for dma transaction\n",
+ chan->channel_number);
+ goto err_elements_prep_slave_sg;
+ }
+
+ dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
+ seg->async_tx.flags = flags;
+ async_tx_ack(&seg->async_tx);
+ seg->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ return &seg->async_tx;
+
+err_elements_prep_slave_sg:
+ list_for_each_entry_safe(ele, ele_nxt, &seg->transfer_nodes, node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(seg, chan->transactions_pool);
+ return NULL;
+}
+
+static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!channel)
+ return;
+
+ chan = to_xilinx_chan(channel);
+
+ if (!list_empty(&chan->pending_list)) {
+ spin_lock(&chan->pending_list_lock);
+ spin_lock(&chan->active_list_lock);
+ list_splice_tail_init(&chan->pending_list,
+ &chan->active_list);
+ spin_unlock(&chan->active_list_lock);
+ spin_unlock(&chan->pending_list_lock);
+ }
+
+ if (!list_empty(&chan->pending_interrupts_list)) {
+ spin_lock(&chan->pending_interrupts_lock);
+ spin_lock(&chan->active_interrupts_lock);
+ list_splice_tail_init(&chan->pending_interrupts_list,
+ &chan->active_interrupts_list);
+ spin_unlock(&chan->active_interrupts_lock);
+ spin_unlock(&chan->pending_interrupts_lock);
+ }
+
+ if (chan->chan_programming)
+ queue_work(chan->chan_programming,
+ &chan->handle_chan_programming);
+}
+
+static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!channel)
+ return PTR_ERR(channel);
+
+ chan = to_xilinx_chan(channel);
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return 1;
+
+ if (chan->maintenance_workq) {
+ if (completion_done(&chan->chan_terminate_complete))
+ reinit_completion(&chan->chan_terminate_complete);
+ queue_work(chan->maintenance_workq,
+ &chan->handle_chan_terminate);
+ wait_for_completion_interruptible(
+ &chan->chan_terminate_complete);
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
+ struct dma_chan *channel, unsigned long flags)
+{
+ struct ps_pcie_dma_chan *chan;
+ struct ps_pcie_intr_segment *intr_segment = NULL;
+
+ if (!channel)
+ return NULL;
+
+ chan = to_xilinx_chan(channel);
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ intr_segment = mempool_alloc(chan->intr_transactions_pool, GFP_ATOMIC);
+
+ memset(intr_segment, 0, sizeof(*intr_segment));
+
+ dma_async_tx_descriptor_init(&intr_segment->async_intr_tx,
+ &chan->common);
+ intr_segment->async_intr_tx.flags = flags;
+ async_tx_ack(&intr_segment->async_intr_tx);
+ intr_segment->async_intr_tx.tx_submit = xilinx_intr_tx_submit;
+
+ return &intr_segment->async_intr_tx;
+}
+
+static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev)
+{
+ int err, i;
+ struct xlnx_pcie_dma_device *xdev;
+ static u16 board_number;
+
+ xdev = devm_kzalloc(&platform_dev->dev,
+ sizeof(struct xlnx_pcie_dma_device), GFP_KERNEL);
+
+ if (!xdev)
+ return -ENOMEM;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ xdev->dma_buf_ext_addr = true;
+#else
+ xdev->dma_buf_ext_addr = false;
+#endif
+
+ xdev->is_rootdma = device_property_read_bool(&platform_dev->dev,
+ "rootdma");
+
+ xdev->dev = &platform_dev->dev;
+ xdev->board_number = board_number;
+
+ err = device_property_read_u32(&platform_dev->dev, "numchannels",
+ &xdev->num_channels);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find numchannels property\n");
+ goto platform_driver_probe_return;
+ }
+
+ if (xdev->num_channels == 0 || xdev->num_channels >
+ MAX_ALLOWED_CHANNELS_IN_HW) {
+ dev_warn(&platform_dev->dev,
+ "Invalid xlnx-num_channels property value\n");
+ xdev->num_channels = MAX_ALLOWED_CHANNELS_IN_HW;
+ }
+
+ xdev->channels =
+ (struct ps_pcie_dma_chan *)devm_kzalloc(&platform_dev->dev,
+ sizeof(struct ps_pcie_dma_chan)
+ * xdev->num_channels,
+ GFP_KERNEL);
+ if (!xdev->channels) {
+ err = -ENOMEM;
+ goto platform_driver_probe_return;
+ }
+
+ if (xdev->is_rootdma)
+ err = read_rootdma_config(platform_dev, xdev);
+ else
+ err = read_epdma_config(platform_dev, xdev);
+
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to initialize dma configuration\n");
+ goto platform_driver_probe_return;
+ }
+
+ /* Initialize the DMA engine */
+ INIT_LIST_HEAD(&xdev->common.channels);
+
+ dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+ dma_cap_set(DMA_INTERRUPT, xdev->common.cap_mask);
+ dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
+
+ xdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ xdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ xdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ xdev->common.device_alloc_chan_resources =
+ xlnx_ps_pcie_dma_alloc_chan_resources;
+ xdev->common.device_free_chan_resources =
+ xlnx_ps_pcie_dma_free_chan_resources;
+ xdev->common.device_terminate_all = xlnx_ps_pcie_dma_terminate_all;
+ xdev->common.device_tx_status = dma_cookie_status;
+ xdev->common.device_issue_pending = xlnx_ps_pcie_dma_issue_pending;
+ xdev->common.device_prep_dma_interrupt =
+ xlnx_ps_pcie_dma_prep_interrupt;
+ xdev->common.device_prep_dma_memcpy = xlnx_ps_pcie_dma_prep_memcpy;
+ xdev->common.device_prep_slave_sg = xlnx_ps_pcie_dma_prep_slave_sg;
+ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ err = probe_channel_properties(platform_dev, xdev, i);
+
+ if (err != 0) {
+ dev_err(xdev->dev,
+ "Unable to read channel properties\n");
+ goto platform_driver_probe_return;
+ }
+ }
+
+ if (xdev->is_rootdma)
+ err = platform_irq_setup(xdev);
+ else
+ err = irq_setup(xdev);
+ if (err) {
+ dev_err(xdev->dev, "Cannot request irq lines for device %d\n",
+ xdev->board_number);
+ goto platform_driver_probe_return;
+ }
+
+ err = dma_async_device_register(&xdev->common);
+ if (err) {
+ dev_err(xdev->dev,
+ "Unable to register board %d with dma framework\n",
+ xdev->board_number);
+ goto platform_driver_probe_return;
+ }
+
+ platform_set_drvdata(platform_dev, xdev);
+
+ board_number++;
+
+ dev_info(&platform_dev->dev, "PS PCIe Platform driver probed\n");
+ return 0;
+
+platform_driver_probe_return:
+ return err;
+}
+
+static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev)
+{
+ struct xlnx_pcie_dma_device *xdev =
+ platform_get_drvdata(platform_dev);
+ int i;
+
+ for (i = 0; i < xdev->num_channels; i++)
+ xlnx_ps_pcie_dma_free_chan_resources(&xdev->channels[i].common);
+
+ dma_async_device_unregister(&xdev->common);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id xlnx_pcie_root_dma_of_ids[] = {
+ { .compatible = "xlnx,ps_pcie_dma-1.00.a", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xlnx_pcie_root_dma_of_ids);
+#endif
+
+static struct platform_driver xlnx_pcie_dma_driver = {
+ .driver = {
+ .name = XLNX_PLATFORM_DRIVER_NAME,
+ .of_match_table = of_match_ptr(xlnx_pcie_root_dma_of_ids),
+ .owner = THIS_MODULE,
+ },
+ .probe = xlnx_pcie_dma_driver_probe,
+ .remove = xlnx_pcie_dma_driver_remove,
+};
+
+int dma_platform_driver_register(void)
+{
+ return platform_driver_register(&xlnx_pcie_dma_driver);
+}
+
+void dma_platform_driver_unregister(void)
+{
+ platform_driver_unregister(&xlnx_pcie_dma_driver);
+}
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 9c845c07b107..643aae9679ee 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -846,6 +846,98 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
}
/**
+ * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction
+ * @dchan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg(
+ struct dma_chan *dchan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct zynqmp_dma_desc_sw *new, *first = NULL;
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+ void *desc = NULL, *prev = NULL;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+ u32 desc_cnt = 0, i;
+ struct scatterlist *sg;
+
+ for_each_sg(src_sg, sg, src_sg_len, i)
+ desc_cnt += DIV_ROUND_UP(sg_dma_len(sg),
+ ZYNQMP_DMA_MAX_TRANS_LEN);
+
+ spin_lock_bh(&chan->lock);
+ if (desc_cnt > chan->desc_free_cnt) {
+ spin_unlock_bh(&chan->lock);
+ dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
+ return NULL;
+ }
+ chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
+ spin_unlock_bh(&chan->lock);
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Allocate and populate the descriptor */
+ new = zynqmp_dma_get_descriptor(chan);
+ desc = (struct zynqmp_dma_desc_ll *)new->src_v;
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
+ if (len == 0)
+ goto fetch;
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+
+ zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst,
+ len, prev);
+ prev = desc;
+ dst_avail -= len;
+ src_avail -= len;
+
+ if (!first)
+ first = new;
+ else
+ list_add_tail(&new->node, &first->tx_list);
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ zynqmp_dma_desc_config_eod(chan, desc);
+ first->async_tx.flags = flags;
+ return &first->async_tx;
+}
+
+/**
* zynqmp_dma_chan_remove - Channel remove function
* @chan: ZynqMP DMA channel pointer
*/
@@ -1044,9 +1136,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&zdev->common.channels);
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_cap_set(DMA_SG, zdev->common.cap_mask);
dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
p = &zdev->common;
+ p->device_prep_dma_sg = zynqmp_dma_prep_sg;
p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
p->device_terminate_all = zynqmp_dma_device_terminate_all;
p->device_issue_pending = zynqmp_dma_issue_pending;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 5e2e0348d460..d90dbb795b12 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -334,6 +334,13 @@ config EDAC_CPC925
a companion chip to the PowerPC 970 family of
processors.
+config EDAC_PL310_L2
+ tristate "Pl310 L2 Cache Controller"
+ depends on ARM
+ help
+ Support for parity error detection on L2 cache controller
+ data and tag ram memory
+
config EDAC_HIGHBANK_MC
tristate "Highbank Memory Controller"
depends on ARCH_HIGHBANK
@@ -467,6 +474,13 @@ config EDAC_SYNOPSYS
Support for error detection and correction on the Synopsys DDR
memory controller.
+config EDAC_ZYNQMP_OCM
+ tristate "Xilinx ZynqMP OCM Controller"
+ depends on ARCH_ZYNQMP
+ help
+ Support for error detection and correction on the xilinx ZynqMP OCM
+ controller.
+
config EDAC_XGENE
tristate "APM X-Gene SoC"
depends on (ARM64 || COMPILE_TEST)
@@ -474,6 +488,14 @@ config EDAC_XGENE
Support for error detection and correction on the
APM X-Gene family of SOCs.
+config EDAC_CORTEX_ARM64
+ tristate "ARM Cortex A57/A53"
+ default y if !CPU_IDLE
+ depends on !CPU_IDLE && ARM64
+ help
+ Support for error detection and correction on the
+ ARM Cortex A57 and A53.
+
config EDAC_TI
tristate "Texas Instruments DDR3 ECC Controller"
depends on ARCH_KEYSTONE || SOC_DRA7XX
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 89ad4a84a0f6..c4f432e7f31b 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -79,8 +79,11 @@ obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o
obj-$(CONFIG_EDAC_THUNDERX) += thunderx_edac.o
obj-$(CONFIG_EDAC_ALTERA) += altera_edac.o
+obj-$(CONFIG_EDAC_PL310_L2) += pl310_edac_l2.o
obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
+obj-$(CONFIG_EDAC_CORTEX_ARM64) += cortex_arm64_edac.o
+obj-$(CONFIG_EDAC_ZYNQMP_OCM) += zynqmp_ocm_edac.o
obj-$(CONFIG_EDAC_TI) += ti_edac.o
obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o
obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o
diff --git a/drivers/edac/cortex_arm64_edac.c b/drivers/edac/cortex_arm64_edac.c
new file mode 100644
index 000000000000..db89ee0c3cc3
--- /dev/null
+++ b/drivers/edac/cortex_arm64_edac.c
@@ -0,0 +1,470 @@
+/*
+ * Cortex A57 and A53 EDAC
+ *
+ * Copyright (c) 2015, Advanced Micro Devices
+ * Author: Brijesh Singh <brijeshkumar.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <ras/ras_event.h>
+
+#include "edac_module.h"
+
+#define DRV_NAME "cortex_edac"
+
+#define CPUMERRSR_EL1_INDEX(x, y) ((x) & (y))
+#define CPUMERRSR_EL1_BANK_WAY(x, y) (((x) >> 18) & (y))
+#define CPUMERRSR_EL1_RAMID(x) (((x) >> 24) & 0x7f)
+#define CPUMERRSR_EL1_VALID(x) ((x) & (1 << 31))
+#define CPUMERRSR_EL1_REPEAT(x) (((x) >> 32) & 0x7f)
+#define CPUMERRSR_EL1_OTHER(x) (((x) >> 40) & 0xff)
+#define CPUMERRSR_EL1_FATAL(x) ((x) & (1UL << 63))
+#define L1_I_TAG_RAM 0x00
+#define L1_I_DATA_RAM 0x01
+#define L1_D_TAG_RAM 0x08
+#define L1_D_DATA_RAM 0x09
+#define L1_D_DIRTY_RAM 0x14
+#define TLB_RAM 0x18
+
+#define L2MERRSR_EL1_CPUID_WAY(x) (((x) >> 18) & 0xf)
+#define L2MERRSR_EL1_RAMID(x) (((x) >> 24) & 0x7f)
+#define L2MERRSR_EL1_VALID(x) ((x) & (1 << 31))
+#define L2MERRSR_EL1_REPEAT(x) (((x) >> 32) & 0xff)
+#define L2MERRSR_EL1_OTHER(x) (((x) >> 40) & 0xff)
+#define L2MERRSR_EL1_FATAL(x) ((x) & (1UL << 63))
+#define L2_TAG_RAM 0x10
+#define L2_DATA_RAM 0x11
+#define L2_SNOOP_RAM 0x12
+#define L2_DIRTY_RAM 0x14
+#define L2_INCLUSION_PF_RAM 0x18
+
+#define L1_CACHE 0
+#define L2_CACHE 1
+
+#define EDAC_MOD_STR DRV_NAME
+
+/* Error injectin macros*/
+#define L1_DCACHE_ERRINJ_ENABLE (1 << 6)
+#define L1_DCACHE_ERRINJ_DISABLE (~(1 << 6))
+#define L2_DCACHE_ERRINJ_ENABLE (1 << 29)
+#define L2_DCACHE_ERRINJ_DISABLE (~(1 << 29))
+#define L2_ECC_PROTECTION (1 << 22)
+
+static int poll_msec = 100;
+
+struct cortex_arm64_edac {
+ struct edac_device_ctl_info *edac_ctl;
+};
+
+static inline u64 read_cpumerrsr_el1(void)
+{
+ u64 val;
+
+ asm volatile("mrs %0, s3_1_c15_c2_2" : "=r" (val));
+ return val;
+}
+
+static inline void write_cpumerrsr_el1(u64 val)
+{
+ asm volatile("msr s3_1_c15_c2_2, %0" :: "r" (val));
+}
+
+static inline u64 read_l2merrsr_el1(void)
+{
+ u64 val;
+
+ asm volatile("mrs %0, s3_1_c15_c2_3" : "=r" (val));
+ return val;
+}
+
+static inline void write_l2merrsr_el1(u64 val)
+{
+ asm volatile("msr s3_1_c15_c2_3, %0" :: "r" (val));
+}
+
+static inline void cortexa53_edac_busy_on_inst(void)
+{
+ asm volatile("isb sy");
+}
+
+static inline void cortexa53_edac_busy_on_data(void)
+{
+ asm volatile("dsb sy");
+}
+
+static inline void write_l2actrl_el1(u64 val)
+{
+ asm volatile("msr s3_1_c15_c0_0, %0" :: "r" (val));
+ cortexa53_edac_busy_on_inst();
+}
+
+static inline u64 read_l2actrl_el1(void)
+{
+ u64 val;
+
+ asm volatile("mrs %0, s3_1_c15_c0_0" : "=r" (val));
+ return val;
+}
+
+static inline u64 read_l2ctlr_el1(void)
+{
+ u64 rval;
+
+ asm volatile("mrs %0, S3_1_C11_C0_2" : "=r" (rval));
+ return rval;
+
+}
+
+static inline u64 read_l1actrl_el1(void)
+{
+ u64 rval;
+
+ asm volatile("mrs %0, S3_1_C15_C2_0" : "=r" (rval));
+ return rval;
+}
+
+static inline void write_l1actrl_el1(u64 val)
+{
+ asm volatile("msr S3_1_C15_C2_0, %0" :: "r" (val));
+}
+
+static void parse_cpumerrsr(void *arg)
+{
+ int cpu, partnum, way;
+ unsigned int index = 0;
+ u64 val = read_cpumerrsr_el1();
+ int repeat_err, other_err;
+
+ /* we do not support fatal error handling so far */
+ if (CPUMERRSR_EL1_FATAL(val))
+ return;
+
+ /* check if we have valid error before continuing */
+ if (!CPUMERRSR_EL1_VALID(val))
+ return;
+
+ cpu = smp_processor_id();
+ partnum = read_cpuid_part_number();
+ repeat_err = CPUMERRSR_EL1_REPEAT(val);
+ other_err = CPUMERRSR_EL1_OTHER(val);
+
+ /* way/bank and index address bit ranges are different between
+ * A57 and A53 */
+ if (partnum == ARM_CPU_PART_CORTEX_A57) {
+ index = CPUMERRSR_EL1_INDEX(val, 0x1ffff);
+ way = CPUMERRSR_EL1_BANK_WAY(val, 0x1f);
+ } else {
+ index = CPUMERRSR_EL1_INDEX(val, 0xfff);
+ way = CPUMERRSR_EL1_BANK_WAY(val, 0x7);
+ }
+
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "CPU%d L1 error detected!\n", cpu);
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "index=%#x, RAMID=", index);
+
+ switch (CPUMERRSR_EL1_RAMID(val)) {
+ case L1_I_TAG_RAM:
+ pr_cont("'L1-I Tag RAM' (way %d)", way);
+ break;
+ case L1_I_DATA_RAM:
+ pr_cont("'L1-I Data RAM' (bank %d)", way);
+ break;
+ case L1_D_TAG_RAM:
+ pr_cont("'L1-D Tag RAM' (way %d)", way);
+ break;
+ case L1_D_DATA_RAM:
+ pr_cont("'L1-D Data RAM' (bank %d)", way);
+ break;
+ case L1_D_DIRTY_RAM:
+ pr_cont("'L1 Dirty RAM'");
+ break;
+ case TLB_RAM:
+ pr_cont("'TLB RAM'");
+ break;
+ default:
+ pr_cont("'unknown'");
+ break;
+ }
+
+ pr_cont(", repeat=%d, other=%d (CPUMERRSR_EL1=%#llx)\n", repeat_err,
+ other_err, val);
+
+ trace_mc_event(HW_EVENT_ERR_CORRECTED, "L1 non-fatal error",
+ "", repeat_err, 0, 0, 0, -1, index, 0, 0, DRV_NAME);
+ write_cpumerrsr_el1(0);
+}
+
+static void a57_parse_l2merrsr_way(u8 ramid, u8 val)
+{
+ switch (ramid) {
+ case L2_TAG_RAM:
+ case L2_DATA_RAM:
+ case L2_DIRTY_RAM:
+ pr_cont("(cpu%d tag, way %d)", val / 2, val % 2);
+ break;
+ case L2_SNOOP_RAM:
+ pr_cont("(cpu%d tag, way %d)", (val & 0x6) >> 1,
+ (val & 0x1));
+ break;
+ }
+}
+
+static void a53_parse_l2merrsr_way(u8 ramid, u8 val)
+{
+ switch (ramid) {
+ case L2_TAG_RAM:
+ pr_cont("(way %d)", val);
+ case L2_DATA_RAM:
+ pr_cont("(bank %d)", val);
+ break;
+ case L2_SNOOP_RAM:
+ pr_cont("(cpu%d tag, way %d)", val / 2, val % 4);
+ break;
+ }
+}
+
+static void parse_l2merrsr(void *arg)
+{
+ int cpu, partnum;
+ unsigned int index;
+ int repeat_err, other_err;
+ u64 val = read_l2merrsr_el1();
+
+ /* we do not support fatal error handling so far */
+ if (L2MERRSR_EL1_FATAL(val))
+ return;
+
+ /* check if we have valid error before continuing */
+ if (!L2MERRSR_EL1_VALID(val))
+ return;
+
+ cpu = smp_processor_id();
+ partnum = read_cpuid_part_number();
+ repeat_err = L2MERRSR_EL1_REPEAT(val);
+ other_err = L2MERRSR_EL1_OTHER(val);
+
+ /* index address range is different between A57 and A53 */
+ if (partnum == ARM_CPU_PART_CORTEX_A57)
+ index = val & 0x1ffff;
+ else
+ index = (val >> 3) & 0x3fff;
+
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "CPU%d L2 error detected!\n", cpu);
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "index=%#x RAMID=", index);
+
+ switch (L2MERRSR_EL1_RAMID(val)) {
+ case L2_TAG_RAM:
+ pr_cont("'L2 Tag RAM'");
+ break;
+ case L2_DATA_RAM:
+ pr_cont("'L2 Data RAM'");
+ break;
+ case L2_SNOOP_RAM:
+ pr_cont("'L2 Snoop tag RAM'");
+ break;
+ case L2_DIRTY_RAM:
+ pr_cont("'L2 Dirty RAM'");
+ break;
+ case L2_INCLUSION_PF_RAM:
+ pr_cont("'L2 inclusion PF RAM'");
+ break;
+ default:
+ pr_cont("unknown");
+ break;
+ }
+
+ /* cpuid/way bit description is different between A57 and A53 */
+ if (partnum == ARM_CPU_PART_CORTEX_A57)
+ a57_parse_l2merrsr_way(L2MERRSR_EL1_RAMID(val),
+ L2MERRSR_EL1_CPUID_WAY(val));
+ else
+ a53_parse_l2merrsr_way(L2MERRSR_EL1_RAMID(val),
+ L2MERRSR_EL1_CPUID_WAY(val));
+
+ pr_cont(", repeat=%d, other=%d (L2MERRSR_EL1=%#llx)\n", repeat_err,
+ other_err, val);
+ trace_mc_event(HW_EVENT_ERR_CORRECTED, "L2 non-fatal error",
+ "", repeat_err, 0, 0, 0, -1, index, 0, 0, DRV_NAME);
+ write_l2merrsr_el1(0);
+}
+
+static void cortex_arm64_edac_check(struct edac_device_ctl_info *edac_ctl)
+{
+ int cpu;
+ struct cpumask cluster_mask, old_mask;
+
+ cpumask_clear(&cluster_mask);
+ cpumask_clear(&old_mask);
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ /* Check CPU L1 error */
+ smp_call_function_single(cpu, parse_cpumerrsr, NULL, 0);
+ cpumask_copy(&cluster_mask, topology_core_cpumask(cpu));
+ if (cpumask_equal(&cluster_mask, &old_mask))
+ continue;
+ cpumask_copy(&old_mask, &cluster_mask);
+ /* Check CPU L2 error */
+ smp_call_function_any(&cluster_mask, parse_l2merrsr, NULL, 0);
+ }
+ put_online_cpus();
+}
+
+static ssize_t cortexa53_edac_inject_L2_show(struct edac_device_ctl_info
+ *dci, char *data)
+{
+ return sprintf(data, "L2ACTLR_EL1: [0x%llx]\n\r", read_l2actrl_el1());
+}
+
+static ssize_t cortexa53_edac_inject_L2_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ u64 l2actrl, l2ecc;
+
+ if (!data)
+ return -EFAULT;
+
+ l2ecc = read_l2ctlr_el1();
+ if ((l2ecc & L2_ECC_PROTECTION)) {
+ l2actrl = read_l2actrl_el1();
+ l2actrl = l2actrl | L2_DCACHE_ERRINJ_ENABLE;
+ write_l2actrl_el1(l2actrl);
+ cortexa53_edac_busy_on_inst();
+ } else {
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "L2 ECC not enabled\n");
+ }
+
+ return count;
+}
+
+static ssize_t cortexa53_edac_inject_L1_show(struct edac_device_ctl_info
+ *dci, char *data)
+{
+ return sprintf(data, "L1CTLR_EL1: [0x%llx]\n\r", read_l1actrl_el1());
+}
+
+static ssize_t cortexa53_edac_inject_L1_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ u64 l1actrl;
+
+ if (!data)
+ return -EFAULT;
+
+ l1actrl = read_l1actrl_el1();
+ l1actrl |= L1_DCACHE_ERRINJ_ENABLE;
+ write_l1actrl_el1(l1actrl);
+ cortexa53_edac_busy_on_inst();
+
+ return count;
+}
+
+static struct edac_dev_sysfs_attribute cortexa53_edac_sysfs_attributes[] = {
+ {
+ .attr = {
+ .name = "inject_L2_Cache_Error",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = cortexa53_edac_inject_L2_show,
+ .store = cortexa53_edac_inject_L2_store},
+ {
+ .attr = {
+ .name = "inject_L1_Cache_Error",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = cortexa53_edac_inject_L1_show,
+ .store = cortexa53_edac_inject_L1_store},
+
+ /* End of list */
+ {
+ .attr = {.name = NULL}
+ }
+};
+
+static void cortexa53_set_edac_sysfs_attributes(struct edac_device_ctl_info
+ *edac_dev)
+{
+ edac_dev->sysfs_attributes = cortexa53_edac_sysfs_attributes;
+}
+
+static int cortex_arm64_edac_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct cortex_arm64_edac *drv;
+ struct device *dev = &pdev->dev;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ /* Only POLL mode is supported */
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ drv->edac_ctl = edac_device_alloc_ctl_info(0, "cpu_cache", 1, "L", 2,
+ 0, NULL, 0,
+ edac_device_alloc_index());
+ if (IS_ERR(drv->edac_ctl))
+ return -ENOMEM;
+
+ drv->edac_ctl->poll_msec = poll_msec;
+ drv->edac_ctl->edac_check = cortex_arm64_edac_check;
+ drv->edac_ctl->dev = dev;
+ drv->edac_ctl->mod_name = dev_name(dev);
+ drv->edac_ctl->dev_name = dev_name(dev);
+ drv->edac_ctl->ctl_name = "cache_err";
+ platform_set_drvdata(pdev, drv);
+
+ cortexa53_set_edac_sysfs_attributes(drv->edac_ctl);
+
+ rc = edac_device_add_device(drv->edac_ctl);
+ if (rc)
+ edac_device_free_ctl_info(drv->edac_ctl);
+
+ return rc;
+}
+
+static int cortex_arm64_edac_remove(struct platform_device *pdev)
+{
+ struct cortex_arm64_edac *drv = dev_get_drvdata(&pdev->dev);
+ struct edac_device_ctl_info *edac_ctl = drv->edac_ctl;
+
+ edac_device_del_device(edac_ctl->dev);
+ edac_device_free_ctl_info(edac_ctl);
+
+ return 0;
+}
+
+static const struct of_device_id cortex_arm64_edac_of_match[] = {
+ { .compatible = "arm,cortex-a57-edac" },
+ { .compatible = "arm,cortex-a53-edac" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cortex_arm64_edac_of_match);
+
+static struct platform_driver cortex_arm64_edac_driver = {
+ .probe = cortex_arm64_edac_probe,
+ .remove = cortex_arm64_edac_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = cortex_arm64_edac_of_match,
+ },
+};
+module_platform_driver(cortex_arm64_edac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Brijesh Singh <brijeshkumar.singh@amd.com>");
+MODULE_DESCRIPTION("Cortex A57 and A53 EDAC driver");
+module_param(poll_msec, int, 0444);
+MODULE_PARM_DESC(poll_msec, "EDAC monitor poll interval in msec");
diff --git a/drivers/edac/pl310_edac_l2.c b/drivers/edac/pl310_edac_l2.c
new file mode 100644
index 000000000000..57f2f5b022d8
--- /dev/null
+++ b/drivers/edac/pl310_edac_l2.c
@@ -0,0 +1,233 @@
+/*
+ * Pl310 L2 Cache EDAC Driver
+ *
+ * Copyright (C) 2013-2014 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <asm/hardware/cache-l2x0.h>
+#include "edac_module.h"
+
+/* Auxilary control register definitions */
+#define L2X0_AUX_CTRL_PARITY_MASK BIT(21)
+
+/* Interrupt imask/status/clear register definitions */
+#define L2X0_INTR_PARRD_MASK 0x4
+#define L2X0_INTR_PARRT_MASK 0x2
+
+/**
+ * struct pl310_edac_l2_priv - Zynq L2 cache controller private instance data
+ * @base: Base address of the controller
+ * @irq: Interrupt number
+ */
+struct pl310_edac_l2_priv {
+ void __iomem *base;
+ int irq;
+};
+
+/**
+ * pl310_edac_l2_parityerr_check - Check controller staus for parity errors
+ * @dci: Pointer to the edac device controller instance
+ *
+ * This routine is used to check and post parity errors
+ */
+static void pl310_edac_l2_parityerr_check(struct edac_device_ctl_info *dci)
+{
+ struct pl310_edac_l2_priv *priv = dci->pvt_info;
+ u32 regval;
+
+ regval = readl(priv->base + L2X0_RAW_INTR_STAT);
+ if (regval & L2X0_INTR_PARRD_MASK) {
+ /* Data parity error will be reported as correctable error */
+ writel(L2X0_INTR_PARRD_MASK, priv->base + L2X0_INTR_CLEAR);
+ edac_device_handle_ce(dci, 0, 0, dci->ctl_name);
+ }
+ if (regval & L2X0_INTR_PARRT_MASK) {
+ /* tag parity error will be reported as uncorrectable error */
+ writel(L2X0_INTR_PARRT_MASK, priv->base + L2X0_INTR_CLEAR);
+ edac_device_handle_ue(dci, 0, 0, dci->ctl_name);
+ }
+}
+
+/**
+ * pl310_edac_l2_int_handler - ISR fucntion for l2cahe controller
+ * @irq: Irq Number
+ * @device: Pointer to the edac device controller instance
+ *
+ * This routine is triggered whenever there is parity error detected
+ *
+ * Return: Always returns IRQ_HANDLED
+ */
+static irqreturn_t pl310_edac_l2_int_handler(int irq, void *device)
+{
+ pl310_edac_l2_parityerr_check((struct edac_device_ctl_info *)device);
+ return IRQ_HANDLED;
+}
+
+/**
+ * pl310_edac_l2_poll_handler - Poll the status reg for parity errors
+ * @dci: Pointer to the edac device controller instance
+ *
+ * This routine is used to check and post parity errors and is called by
+ * the EDAC polling thread
+ */
+static void pl310_edac_l2_poll_handler(struct edac_device_ctl_info *dci)
+{
+ pl310_edac_l2_parityerr_check(dci);
+}
+
+/**
+ * pl310_edac_l2_get_paritystate - check the parity enable/disable status
+ * @base: Pointer to the contoller base address
+ *
+ * This routine returns the parity enable/diable status for the controller
+ *
+ * Return: true/false - parity enabled/disabled.
+ */
+static bool pl310_edac_l2_get_paritystate(void __iomem *base)
+{
+ u32 regval;
+
+ regval = readl(base + L2X0_AUX_CTRL);
+ if (regval & L2X0_AUX_CTRL_PARITY_MASK)
+ return true;
+
+ return false;
+}
+
+/**
+ * pl310_edac_l2_probe - Check controller and bind driver
+ * @pdev: Pointer to the platform_device struct
+ *
+ * This routine probes a specific arm,pl310-cache instance for binding
+ * with the driver.
+ *
+ * Return: 0 if the controller instance was successfully bound to the
+ * driver; otherwise, < 0 on error.
+ */
+static int pl310_edac_l2_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci;
+ struct pl310_edac_l2_priv *priv;
+ int rc;
+ struct resource *res;
+ void __iomem *baseaddr;
+ u32 regval;
+
+ /* Get the data from the platform device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ baseaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(baseaddr))
+ return PTR_ERR(baseaddr);
+
+ /* Check for the ecc enable status */
+ if (pl310_edac_l2_get_paritystate(baseaddr) == false) {
+ dev_err(&pdev->dev, "parity check not enabled\n");
+ return -ENXIO;
+ }
+
+ dci = edac_device_alloc_ctl_info(sizeof(*priv), "l2cache",
+ 1, "L", 1, 1, NULL, 0,
+ edac_device_alloc_index());
+ if (IS_ERR(dci))
+ return PTR_ERR(dci);
+
+ priv = dci->pvt_info;
+ priv->base = baseaddr;
+ dci->dev = &pdev->dev;
+ dci->mod_name = "pl310_edac_l2";
+ dci->ctl_name = "pl310_l2_controller";
+ dci->dev_name = dev_name(&pdev->dev);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ rc = devm_request_irq(&pdev->dev, priv->irq,
+ pl310_edac_l2_int_handler,
+ 0, dev_name(&pdev->dev), (void *)dci);
+ if (rc < 0) {
+ dci->edac_check = pl310_edac_l2_poll_handler;
+ edac_op_state = EDAC_OPSTATE_POLL;
+ }
+
+ rc = edac_device_add_device(dci);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to register with EDAC core\n");
+ goto del_edac_device;
+ }
+
+ if (edac_op_state != EDAC_OPSTATE_POLL) {
+ regval = readl(priv->base+L2X0_INTR_MASK);
+ regval |= (L2X0_INTR_PARRD_MASK | L2X0_INTR_PARRT_MASK);
+ writel(regval, priv->base+L2X0_INTR_MASK);
+ }
+
+ return rc;
+
+del_edac_device:
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+
+ return rc;
+}
+
+/**
+ * pl310_edac_l2_remove - Unbind driver from controller
+ * @pdev: Pointer to the platform_device struct
+ *
+ * This routine unbinds the EDAC device controller instance associated
+ * with the specified arm,pl310-cache controller described by the
+ * OpenFirmware device tree node passed as a parameter.
+ *
+ * Return: Always returns 0
+ */
+static int pl310_edac_l2_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
+ struct pl310_edac_l2_priv *priv = dci->pvt_info;
+ u32 regval;
+
+ if (edac_op_state != EDAC_OPSTATE_POLL) {
+ regval = readl(priv->base+L2X0_INTR_MASK);
+ regval &= ~(L2X0_INTR_PARRD_MASK | L2X0_INTR_PARRT_MASK);
+ writel(regval, priv->base+L2X0_INTR_MASK);
+ }
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+
+ return 0;
+}
+
+/* Device tree node type and compatible tuples this driver can match on */
+static const struct of_device_id pl310_edac_l2_match[] = {
+ { .compatible = "arm,pl310-cache", },
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, pl310_edac_l2_match);
+
+static struct platform_driver pl310_edac_l2_driver = {
+ .driver = {
+ .name = "pl310-edac-l2",
+ .of_match_table = pl310_edac_l2_match,
+ },
+ .probe = pl310_edac_l2_probe,
+ .remove = pl310_edac_l2_remove,
+};
+
+module_platform_driver(pl310_edac_l2_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("pl310 L2 EDAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/edac/zynqmp_ocm_edac.c b/drivers/edac/zynqmp_ocm_edac.c
new file mode 100644
index 000000000000..4957a8c9d02d
--- /dev/null
+++ b/drivers/edac/zynqmp_ocm_edac.c
@@ -0,0 +1,651 @@
+/*
+ * Xilinx ZynqMP OCM ECC Driver
+ * This driver is based on mpc85xx_edac.c drivers
+ *
+ * Copyright (C) 2016 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details
+ */
+
+#include <linux/edac.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include "edac_module.h"
+
+#define ZYNQMP_OCM_EDAC_MSG_SIZE 256
+
+#define ZYNQMP_OCM_EDAC_STRING "zynqmp_ocm"
+#define ZYNQMP_OCM_EDAC_MOD_VER "1"
+
+/* Controller registers */
+#define CTRL_OFST 0x0
+#define OCM_ISR_OFST 0x04
+#define OCM_IMR_OFST 0x08
+#define OCM_IEN_OFST 0x0C
+#define OCM_IDS_OFST 0x10
+
+/* ECC control register */
+#define ECC_CTRL_OFST 0x14
+
+/* Correctable error info registers */
+#define CE_FFA_OFST 0x1C
+#define CE_FFD0_OFST 0x20
+#define CE_FFD1_OFST 0x24
+#define CE_FFD2_OFST 0x28
+#define CE_FFD3_OFST 0x2C
+#define CE_FFE_OFST 0x30
+
+/* Uncorrectable error info registers */
+#define UE_FFA_OFST 0x34
+#define UE_FFD0_OFST 0x38
+#define UE_FFD1_OFST 0x3C
+#define UE_FFD2_OFST 0x40
+#define UE_FFD3_OFST 0x44
+#define UE_FFE_OFST 0x48
+
+/* ECC control register bit field definitions */
+#define ECC_CTRL_CLR_CE_ERR 0x40
+#define ECC_CTRL_CLR_UE_ERR 0x80
+
+/* Fault injection data and count registers */
+#define OCM_FID0_OFST 0x4C
+#define OCM_FID1_OFST 0x50
+#define OCM_FID2_OFST 0x54
+#define OCM_FID3_OFST 0x58
+#define OCM_FIC_OFST 0x74
+
+/* Interrupt masks */
+#define OCM_CEINTR_MASK 0x40
+#define OCM_UEINTR_MASK 0x80
+#define OCM_ECC_ENABLE_MASK 0x1
+#define OCM_FICOUNT_MASK 0x0FFFFFFF
+#define OCM_BASEVAL 0xFFFC0000
+#define EDAC_DEVICE "ZynqMP-OCM"
+#define OCM_CEUE_MASK 0xC0
+
+/**
+ * struct ecc_error_info - ECC error log information
+ * @addr: Fault generated at this address
+ * @data0: Generated fault data
+ * @data1: Generated fault data
+ */
+struct ecc_error_info {
+ u32 addr;
+ u32 data0;
+ u32 data1;
+};
+
+/**
+ * struct zynqmp_ocm_ecc_status - ECC status information to report
+ * @ce_cnt: Correctable error count
+ * @ue_cnt: Uncorrectable error count
+ * @ceinfo: Correctable error log information
+ * @ueinfo: Uncorrectable error log information
+ */
+struct zynqmp_ocm_ecc_status {
+ u32 ce_cnt;
+ u32 ue_cnt;
+ struct ecc_error_info ceinfo;
+ struct ecc_error_info ueinfo;
+};
+
+/**
+ * struct zynqmp_ocm_edac_priv - DDR memory controller private instance data
+ * @baseaddr: Base address of the DDR controller
+ * @message: Buffer for framing the event specific info
+ * @stat: ECC status information
+ * @p_data: Pointer to platform data
+ * @ce_cnt: Correctable Error count
+ * @ue_cnt: Uncorrectable Error count
+ * @ce_bitpos: Bit position for Correctable Error
+ * @ue_bitpos0: First bit position for Uncorrectable Error
+ * @ue_bitpos1: Second bit position for Uncorrectable Error
+ */
+struct zynqmp_ocm_edac_priv {
+ void __iomem *baseaddr;
+ char message[ZYNQMP_OCM_EDAC_MSG_SIZE];
+ struct zynqmp_ocm_ecc_status stat;
+ const struct zynqmp_ocm_platform_data *p_data;
+ u32 ce_cnt;
+ u32 ue_cnt;
+ u8 ce_bitpos;
+ u8 ue_bitpos0;
+ u8 ue_bitpos1;
+};
+
+/**
+ * zynqmp_ocm_edac_geterror_info - Get the current ecc error info
+ * @base: Pointer to the base address of the ddr memory controller
+ * @p: Pointer to the ocm ecc status structure
+ * @mask: Status register mask value
+ *
+ * Determines there is any ecc error or not
+ *
+ */
+static void zynqmp_ocm_edac_geterror_info(void __iomem *base,
+ struct zynqmp_ocm_ecc_status *p, int mask)
+{
+ if (mask & OCM_CEINTR_MASK) {
+ p->ce_cnt++;
+ p->ceinfo.data0 = readl(base + CE_FFD0_OFST);
+ p->ceinfo.data1 = readl(base + CE_FFD1_OFST);
+ p->ceinfo.addr = (OCM_BASEVAL | readl(base + CE_FFA_OFST));
+ writel(ECC_CTRL_CLR_CE_ERR, base + OCM_ISR_OFST);
+ } else if (mask & OCM_UEINTR_MASK) {
+ p->ue_cnt++;
+ p->ueinfo.data0 = readl(base + UE_FFD0_OFST);
+ p->ueinfo.data1 = readl(base + UE_FFD1_OFST);
+ p->ueinfo.addr = (OCM_BASEVAL | readl(base + UE_FFA_OFST));
+ writel(ECC_CTRL_CLR_UE_ERR, base + OCM_ISR_OFST);
+ }
+}
+
+/**
+ * zynqmp_ocm_edac_handle_error - Handle controller error types CE and UE
+ * @dci: Pointer to the edac device controller instance
+ * @p: Pointer to the ocm ecc status structure
+ *
+ * Handles the controller ECC correctable and un correctable error.
+ */
+static void zynqmp_ocm_edac_handle_error(struct edac_device_ctl_info *dci,
+ struct zynqmp_ocm_ecc_status *p)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+ struct ecc_error_info *pinf;
+
+ if (p->ce_cnt) {
+ pinf = &p->ceinfo;
+ snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
+ "\n\rOCM ECC error type :%s\n\r"
+ "Addr: [0x%X]\n\rFault Data[31:0]: [0x%X]\n\r"
+ "Fault Data[63:32]: [0x%X]",
+ "CE", pinf->addr, pinf->data0, pinf->data1);
+ edac_device_handle_ce(dci, 0, 0, priv->message);
+ }
+
+ if (p->ue_cnt) {
+ pinf = &p->ueinfo;
+ snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
+ "\n\rOCM ECC error type :%s\n\r"
+ "Addr: [0x%X]\n\rFault Data[31:0]: [0x%X]\n\r"
+ "Fault Data[63:32]: [0x%X]",
+ "UE", pinf->addr, pinf->data0, pinf->data1);
+ edac_device_handle_ue(dci, 0, 0, priv->message);
+ }
+
+ memset(p, 0, sizeof(*p));
+}
+
+/**
+ * zynqmp_ocm_edac_intr_handler - isr routine
+ * @irq: irq number
+ * @dev_id: device id poniter
+ *
+ * This is the Isr routine called by edac core interrupt thread.
+ * Used to check and post ECC errors.
+ *
+ * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise
+ */
+static irqreturn_t zynqmp_ocm_edac_intr_handler(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *dci = dev_id;
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+ int regval;
+
+ regval = readl(priv->baseaddr + OCM_ISR_OFST);
+ if (!(regval & OCM_CEUE_MASK))
+ return IRQ_NONE;
+
+ zynqmp_ocm_edac_geterror_info(priv->baseaddr,
+ &priv->stat, regval);
+
+ priv->ce_cnt += priv->stat.ce_cnt;
+ priv->ue_cnt += priv->stat.ue_cnt;
+ zynqmp_ocm_edac_handle_error(dci, &priv->stat);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * zynqmp_ocm_edac_get_eccstate - Return the controller ecc status
+ * @base: Pointer to the ddr memory controller base address
+ *
+ * Get the ECC enable/disable status for the controller
+ *
+ * Return: ecc status 0/1.
+ */
+static bool zynqmp_ocm_edac_get_eccstate(void __iomem *base)
+{
+ return readl(base + ECC_CTRL_OFST) & OCM_ECC_ENABLE_MASK;
+}
+
+static const struct of_device_id zynqmp_ocm_edac_match[] = {
+ { .compatible = "xlnx,zynqmp-ocmc-1.0"},
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_ocm_edac_match);
+
+/**
+ * zynqmp_ocm_edac_inject_fault_count_show - Shows fault injection count
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ *
+ * Shows the fault injection count, once the counter reaches
+ * zero, it injects errors
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_fault_count_show(
+ struct edac_device_ctl_info *dci, char *data)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ return sprintf(data, "FIC: 0x%x\n\r",
+ readl(priv->baseaddr + OCM_FIC_OFST));
+}
+
+/**
+ * zynqmp_ocm_edac_inject_fault_count_store - write fi count
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ * @count: read the size bytes from buffer
+ *
+ * Update the fault injection count register, once the counter reaches
+ * zero, it injects errors
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_fault_count_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+ u32 ficount;
+
+ if (!data)
+ return -EFAULT;
+
+ if (kstrtouint(data, 0, &ficount))
+ return -EINVAL;
+
+ ficount &= OCM_FICOUNT_MASK;
+ writel(ficount, priv->baseaddr + OCM_FIC_OFST);
+
+ return count;
+}
+
+/**
+ * zynqmp_ocm_edac_inject_cebitpos_show - Shows CE bit position
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ *
+ * Shows the Correctable error bit position,
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_cebitpos_show(struct edac_device_ctl_info
+ *dci, char *data)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (priv->ce_bitpos <= 31)
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID0_OFST))));
+
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID1_OFST))));
+}
+
+/**
+ * zynqmp_ocm_edac_inject_cebitpos_store - Set CE bit postion
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ * @count: read the size bytes from buffer
+ *
+ * Set any one bit to inject CE error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_cebitpos_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (!data)
+ return -EFAULT;
+
+ if (kstrtou8(data, 0, &priv->ce_bitpos))
+ return -EINVAL;
+
+ if (priv->ce_bitpos <= 31) {
+ writel(1 << priv->ce_bitpos, priv->baseaddr + OCM_FID0_OFST);
+ writel(0, priv->baseaddr + OCM_FID1_OFST);
+ } else if (priv->ce_bitpos >= 32 && priv->ce_bitpos <= 63) {
+ writel(1 << (priv->ce_bitpos - 32),
+ priv->baseaddr + OCM_FID1_OFST);
+ writel(0, priv->baseaddr + OCM_FID0_OFST);
+ } else {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Bit number > 64 is not valid\n");
+ }
+
+ return count;
+}
+
+/**
+ * zynqmp_ocm_edac_inject_uebitpos0_show - Shows UE bit postion0
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ *
+ * Shows the one of bit position for UE error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_uebitpos0_show(
+ struct edac_device_ctl_info *dci, char *data)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (priv->ue_bitpos0 <= 31)
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID0_OFST))));
+
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID1_OFST))));
+}
+
+/**
+ * zynqmp_ocm_edac_inject_uebitpos0_store - set UE bit position0
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ * @count: read the size bytes from buffer
+ *
+ * Set the first bit postion for UE Error generation,we need to configure
+ * any two bitpositions to inject UE Error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_uebitpos0_store(
+ struct edac_device_ctl_info *dci,
+ const char *data, size_t count)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (!data)
+ return -EFAULT;
+
+ if (kstrtou8(data, 0, &priv->ue_bitpos0))
+ return -EINVAL;
+
+ if (priv->ue_bitpos0 <= 31)
+ writel(1 << priv->ue_bitpos0, priv->baseaddr + OCM_FID0_OFST);
+ else if (priv->ue_bitpos0 >= 32 && priv->ue_bitpos0 <= 63)
+ writel(1 << (priv->ue_bitpos0 - 32),
+ priv->baseaddr + OCM_FID1_OFST);
+ else
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Bit position > 64 is not valid\n");
+ edac_printk(KERN_INFO, EDAC_DEVICE,
+ "Set another bit position for UE\n");
+ return count;
+}
+
+/**
+ * zynqmp_ocm_edac_inject_uebitpos1_show - Shows UE bit postion1
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ *
+ * Shows the second bit postion configured for UE error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_uebitpos1_show(
+ struct edac_device_ctl_info *dci, char *data)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (priv->ue_bitpos1 <= 31)
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID0_OFST))));
+
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID1_OFST))));
+
+}
+
+/**
+ * zynqmp_ocm_edac_inject_uebitposition1_store - Set UE second bit postion
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ * @count: read the size bytes from buffer
+ *
+ * Set the second bit postion for UE Error generation,we need to configure
+ * any two bitpositions to inject UE Error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_uebitpos1_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+ u32 mask;
+
+ if (!data)
+ return -EFAULT;
+
+ if (kstrtou8(data, 0, &priv->ue_bitpos1))
+ return -EINVAL;
+
+ if (priv->ue_bitpos0 == priv->ue_bitpos1) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Bit positions should not be equal\n");
+ return -EINVAL;
+ }
+
+ /* If both bit postions are referring to 32 bit data, then configure
+ * only FID0 register or if it is 64 bit data, then configure only
+ * FID1 register.
+ */
+ if (priv->ue_bitpos0 <= 31 &&
+ priv->ue_bitpos1 <= 31) {
+ mask = (1 << priv->ue_bitpos0);
+ mask |= (1 << priv->ue_bitpos1);
+ writel(mask, priv->baseaddr + OCM_FID0_OFST);
+ writel(0, priv->baseaddr + OCM_FID1_OFST);
+ } else if ((priv->ue_bitpos0 >= 32 && priv->ue_bitpos0 <= 63) &&
+ (priv->ue_bitpos1 >= 32 && priv->ue_bitpos1 <= 63)) {
+ mask = (1 << (priv->ue_bitpos0 - 32));
+ mask |= (1 << (priv->ue_bitpos1 - 32));
+ writel(mask, priv->baseaddr + OCM_FID1_OFST);
+ writel(0, priv->baseaddr + OCM_FID0_OFST);
+ }
+
+ /* If one bit position is referring a bit in 32 bit data and other in
+ * 64 bit data, just configure FID0/FID1 based on uebitpos1.
+ */
+ if ((priv->ue_bitpos0 <= 31) &&
+ (priv->ue_bitpos1 >= 32 && priv->ue_bitpos1 <= 63)) {
+ writel(1 << (priv->ue_bitpos1 - 32),
+ priv->baseaddr + OCM_FID1_OFST);
+ } else if ((priv->ue_bitpos0 >= 32 && priv->ue_bitpos0 <= 63) &&
+ (priv->ue_bitpos1 <= 31)) {
+ writel(1 << priv->ue_bitpos1,
+ priv->baseaddr + OCM_FID0_OFST);
+ } else {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Bit position > 64 is not valid, Valid bits:[63:0]\n");
+ }
+
+ edac_printk(KERN_INFO, EDAC_DEVICE,
+ "UE at Bit Position0: %d Bit Position1: %d\n",
+ priv->ue_bitpos0, priv->ue_bitpos1);
+ return count;
+}
+
+static struct edac_dev_sysfs_attribute zynqmp_ocm_edac_sysfs_attributes[] = {
+ {
+ .attr = {
+ .name = "inject_cebitpos",
+ .mode = (0644)
+ },
+ .show = zynqmp_ocm_edac_inject_cebitpos_show,
+ .store = zynqmp_ocm_edac_inject_cebitpos_store},
+ {
+ .attr = {
+ .name = "inject_uebitpos0",
+ .mode = (0644)
+ },
+ .show = zynqmp_ocm_edac_inject_uebitpos0_show,
+ .store = zynqmp_ocm_edac_inject_uebitpos0_store},
+ {
+ .attr = {
+ .name = "inject_uebitpos1",
+ .mode = (0644)
+ },
+ .show = zynqmp_ocm_edac_inject_uebitpos1_show,
+ .store = zynqmp_ocm_edac_inject_uebitpos1_store},
+ {
+ .attr = {
+ .name = "inject_fault_count",
+ .mode = (0644)
+ },
+ .show = zynqmp_ocm_edac_inject_fault_count_show,
+ .store = zynqmp_ocm_edac_inject_fault_count_store},
+ /* End of list */
+ {
+ .attr = {.name = NULL}
+ }
+};
+
+/**
+ * zynqmp_set_ocm_edac_sysfs_attributes - create sysfs attributes
+ * @edac_dev: Pointer to the edac device struct
+ *
+ * Creates sysfs entires for error injection
+ * Return: None.
+ */
+static void zynqmp_set_ocm_edac_sysfs_attributes(struct edac_device_ctl_info
+ *edac_dev)
+{
+ edac_dev->sysfs_attributes = zynqmp_ocm_edac_sysfs_attributes;
+}
+
+/**
+ * zynqmp_ocm_edac_probe - Check controller and bind driver
+ * @pdev: Pointer to the platform_device struct
+ *
+ * Probes a specific controller instance for binding with the driver.
+ *
+ * Return: 0 if the controller instance was successfully bound to the
+ * driver; otherwise, < 0 on error.
+ */
+static int zynqmp_ocm_edac_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci;
+ struct zynqmp_ocm_edac_priv *priv;
+ int irq, status;
+ struct resource *res;
+ void __iomem *baseaddr;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ baseaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(baseaddr))
+ return PTR_ERR(baseaddr);
+
+ if (!zynqmp_ocm_edac_get_eccstate(baseaddr)) {
+ edac_printk(KERN_INFO, EDAC_DEVICE,
+ "ECC not enabled - Disabling EDAC driver\n");
+ return -ENXIO;
+ }
+
+ dci = edac_device_alloc_ctl_info(sizeof(*priv), ZYNQMP_OCM_EDAC_STRING,
+ 1, ZYNQMP_OCM_EDAC_STRING, 1, 0, NULL, 0,
+ edac_device_alloc_index());
+ if (!dci) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to allocate EDAC device\n");
+ return -ENOMEM;
+ }
+
+ priv = dci->pvt_info;
+ platform_set_drvdata(pdev, dci);
+ dci->dev = &pdev->dev;
+ priv->baseaddr = baseaddr;
+ dci->mod_name = pdev->dev.driver->name;
+ dci->ctl_name = ZYNQMP_OCM_EDAC_STRING;
+ dci->dev_name = dev_name(&pdev->dev);
+
+ zynqmp_set_ocm_edac_sysfs_attributes(dci);
+ if (edac_device_add_device(dci))
+ goto free_dev_ctl;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "No irq %d in DT\n", irq);
+ return irq;
+ }
+
+ status = devm_request_irq(&pdev->dev, irq,
+ zynqmp_ocm_edac_intr_handler,
+ 0, dev_name(&pdev->dev), dci);
+ if (status < 0) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "Failed to request Irq\n");
+ goto free_edac_dev;
+ }
+
+ writel(OCM_CEUE_MASK, priv->baseaddr + OCM_IEN_OFST);
+
+ return 0;
+
+free_edac_dev:
+ edac_device_del_device(&pdev->dev);
+free_dev_ctl:
+ edac_device_free_ctl_info(dci);
+
+ return -1;
+}
+
+/**
+ * zynqmp_ocm_edac_remove - Unbind driver from controller
+ * @pdev: Pointer to the platform_device struct
+ *
+ * Return: Unconditionally 0
+ */
+static int zynqmp_ocm_edac_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ writel(OCM_CEUE_MASK, priv->baseaddr + OCM_IDS_OFST);
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_ocm_edac_driver = {
+ .driver = {
+ .name = "zynqmp-ocm-edac",
+ .of_match_table = zynqmp_ocm_edac_match,
+ },
+ .probe = zynqmp_ocm_edac_probe,
+ .remove = zynqmp_ocm_edac_remove,
+};
+
+module_platform_driver(zynqmp_ocm_edac_driver);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("ZynqMP OCM ECC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/xilinx/Kconfig b/drivers/firmware/xilinx/Kconfig
index bd33bbf70daf..0860f29a8ec8 100644
--- a/drivers/firmware/xilinx/Kconfig
+++ b/drivers/firmware/xilinx/Kconfig
@@ -21,4 +21,10 @@ config ZYNQMP_FIRMWARE_DEBUG
Say yes to enable ZynqMP firmware interface debug APIs.
If in doubt, say N.
+config ZYNQMP_FIRMWARE_SECURE
+ bool "Enable Xilinx Zynq MPSoC secure firmware loading APIs"
+ help
+ Say yes to enable ZynqMP secure firmware loading APIs.
+ In doubt, say N
+
endmenu
diff --git a/drivers/firmware/xilinx/Makefile b/drivers/firmware/xilinx/Makefile
index 875a53703c82..1b57bb14ad94 100644
--- a/drivers/firmware/xilinx/Makefile
+++ b/drivers/firmware/xilinx/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Xilinx firmwares
-obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o
+obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ggs.o
obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o
+obj-$(CONFIG_ZYNQMP_FIRMWARE_SECURE) += zynqmp-secure.o
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index c6d0724da4db..1ba7bfa50590 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -31,13 +31,88 @@ static char debugfs_buf[PAGE_SIZE];
#define PM_API(id) {id, #id, strlen(#id)}
static struct pm_api_info pm_api_list[] = {
+ PM_API(PM_REQUEST_SUSPEND),
+ PM_API(PM_SELF_SUSPEND),
+ PM_API(PM_FORCE_POWERDOWN),
+ PM_API(PM_ABORT_SUSPEND),
+ PM_API(PM_REQUEST_WAKEUP),
+ PM_API(PM_SET_WAKEUP_SOURCE),
+ PM_API(PM_SYSTEM_SHUTDOWN),
+ PM_API(PM_REQUEST_NODE),
+ PM_API(PM_RELEASE_NODE),
+ PM_API(PM_SET_REQUIREMENT),
+ PM_API(PM_SET_MAX_LATENCY),
PM_API(PM_GET_API_VERSION),
+ PM_API(PM_SET_CONFIGURATION),
+ PM_API(PM_GET_NODE_STATUS),
+ PM_API(PM_GET_OPERATING_CHARACTERISTIC),
+ PM_API(PM_REGISTER_NOTIFIER),
+ PM_API(PM_RESET_ASSERT),
+ PM_API(PM_RESET_GET_STATUS),
+ PM_API(PM_GET_CHIPID),
+ PM_API(PM_PINCTRL_GET_FUNCTION),
+ PM_API(PM_PINCTRL_SET_FUNCTION),
+ PM_API(PM_PINCTRL_CONFIG_PARAM_GET),
+ PM_API(PM_PINCTRL_CONFIG_PARAM_SET),
+ PM_API(PM_IOCTL),
+ PM_API(PM_CLOCK_ENABLE),
+ PM_API(PM_CLOCK_DISABLE),
+ PM_API(PM_CLOCK_GETSTATE),
+ PM_API(PM_CLOCK_SETDIVIDER),
+ PM_API(PM_CLOCK_GETDIVIDER),
+ PM_API(PM_CLOCK_SETRATE),
+ PM_API(PM_CLOCK_GETRATE),
+ PM_API(PM_CLOCK_SETPARENT),
+ PM_API(PM_CLOCK_GETPARENT),
PM_API(PM_QUERY_DATA),
};
struct dentry *firmware_debugfs_root;
/**
+ * zynqmp_pm_self_suspend - PM call for master to suspend itself
+ * @node: Node ID of the master or subsystem
+ * @latency: Requested maximum wakeup latency (not supported)
+ * @state: Requested state (not supported)
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_self_suspend(const u32 node, const u32 latency,
+ const u32 state)
+{
+ return zynqmp_pm_invoke_fn(PM_SELF_SUSPEND, node, latency,
+ state, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_abort_suspend - PM call to announce that a prior suspend request
+ * is to be aborted.
+ * @reason: Reason for the abort
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_abort_suspend(const enum zynqmp_pm_abort_reason reason)
+{
+ return zynqmp_pm_invoke_fn(PM_ABORT_SUSPEND, reason, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_register_notifier - Register the PU to be notified of PM events
+ * @node: Node ID of the slave
+ * @event: The event to be notified about
+ * @wake: Wake up on event
+ * @enable: Enable or disable the notifier
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable)
+{
+ return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, node, event,
+ wake, enable, NULL);
+}
+
+/**
* zynqmp_pm_argument_value() - Extract argument value from a PM-API request
* @arg: Entered PM-API argument in string format
*
@@ -87,6 +162,7 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
{
const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
u32 pm_api_version;
+ u64 rate;
int ret;
struct zynqmp_pm_query_data qdata = {0};
@@ -96,6 +172,190 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
pm_api_version >> 16, pm_api_version & 0xffff);
break;
+ case PM_REQUEST_SUSPEND:
+ ret = eemi_ops->request_suspend(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_REQUEST_ACK_NO,
+ pm_api_arg[2] ? pm_api_arg[2] :
+ ZYNQMP_PM_MAX_LATENCY, 0);
+ break;
+ case PM_SELF_SUSPEND:
+ ret = zynqmp_pm_self_suspend(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_MAX_LATENCY, 0);
+ break;
+ case PM_FORCE_POWERDOWN:
+ ret = eemi_ops->force_powerdown(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ break;
+ case PM_ABORT_SUSPEND:
+ ret = zynqmp_pm_abort_suspend(pm_api_arg[0] ? pm_api_arg[0] :
+ ZYNQMP_PM_ABORT_REASON_UNKNOWN);
+ break;
+ case PM_REQUEST_WAKEUP:
+ ret = eemi_ops->request_wakeup(pm_api_arg[0],
+ pm_api_arg[1], pm_api_arg[2],
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ break;
+ case PM_SET_WAKEUP_SOURCE:
+ ret = eemi_ops->set_wakeup_source(pm_api_arg[0], pm_api_arg[1],
+ pm_api_arg[2]);
+ break;
+ case PM_SYSTEM_SHUTDOWN:
+ ret = eemi_ops->system_shutdown(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_REQUEST_NODE:
+ ret = eemi_ops->request_node(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_CAPABILITY_ACCESS,
+ pm_api_arg[2] ? pm_api_arg[2] : 0,
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ break;
+ case PM_RELEASE_NODE:
+ ret = eemi_ops->release_node(pm_api_arg[0]);
+ break;
+ case PM_SET_REQUIREMENT:
+ ret = eemi_ops->set_requirement(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_CAPABILITY_CONTEXT,
+ pm_api_arg[2] ?
+ pm_api_arg[2] : 0,
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ break;
+ case PM_SET_MAX_LATENCY:
+ ret = eemi_ops->set_max_latency(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_MAX_LATENCY);
+ break;
+ case PM_SET_CONFIGURATION:
+ ret = eemi_ops->set_configuration(pm_api_arg[0]);
+ break;
+ case PM_GET_NODE_STATUS:
+ ret = eemi_ops->get_node_status(pm_api_arg[0],
+ &pm_api_ret[0],
+ &pm_api_ret[1],
+ &pm_api_ret[2]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "GET_NODE_STATUS:\n\tNodeId: %llu\n\tStatus: %u\n\tRequirements: %u\n\tUsage: %u\n",
+ pm_api_arg[0], pm_api_ret[0],
+ pm_api_ret[1], pm_api_ret[2]);
+ break;
+ case PM_GET_OPERATING_CHARACTERISTIC:
+ ret = eemi_ops->get_operating_characteristic(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_OPERATING_CHARACTERISTIC_POWER,
+ &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "GET_OPERATING_CHARACTERISTIC:\n\tNodeId: %llu\n\tType: %llu\n\tResult: %u\n",
+ pm_api_arg[0], pm_api_arg[1],
+ pm_api_ret[0]);
+ break;
+ case PM_REGISTER_NOTIFIER:
+ ret = zynqmp_pm_register_notifier(pm_api_arg[0],
+ pm_api_arg[1] ?
+ pm_api_arg[1] : 0,
+ pm_api_arg[2] ?
+ pm_api_arg[2] : 0,
+ pm_api_arg[3] ?
+ pm_api_arg[3] : 0);
+ break;
+ case PM_RESET_ASSERT:
+ ret = eemi_ops->reset_assert(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_RESET_GET_STATUS:
+ ret = eemi_ops->reset_get_status(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Reset status: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_GET_CHIPID:
+ ret = eemi_ops->get_chipid(&pm_api_ret[0], &pm_api_ret[1]);
+ if (!ret)
+ sprintf(debugfs_buf, "Idcode: %#x, Version:%#x\n",
+ pm_api_ret[0], pm_api_ret[1]);
+ break;
+ case PM_PINCTRL_GET_FUNCTION:
+ ret = eemi_ops->pinctrl_get_function(pm_api_arg[0],
+ &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Current set function for the pin: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_PINCTRL_SET_FUNCTION:
+ ret = eemi_ops->pinctrl_set_function(pm_api_arg[0],
+ pm_api_arg[1]);
+ break;
+ case PM_PINCTRL_CONFIG_PARAM_GET:
+ ret = eemi_ops->pinctrl_get_config(pm_api_arg[0], pm_api_arg[1],
+ &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Pin: %llu, Param: %llu, Value: %u\n",
+ pm_api_arg[0], pm_api_arg[1],
+ pm_api_ret[0]);
+ break;
+ case PM_PINCTRL_CONFIG_PARAM_SET:
+ ret = eemi_ops->pinctrl_set_config(pm_api_arg[0],
+ pm_api_arg[1],
+ pm_api_arg[2]);
+ break;
+ case PM_IOCTL:
+ ret = eemi_ops->ioctl(pm_api_arg[0], pm_api_arg[1],
+ pm_api_arg[2], pm_api_arg[3],
+ &pm_api_ret[0]);
+ if (!ret && (pm_api_arg[1] == IOCTL_GET_RPU_OPER_MODE ||
+ pm_api_arg[1] == IOCTL_GET_PLL_FRAC_MODE ||
+ pm_api_arg[1] == IOCTL_GET_PLL_FRAC_DATA ||
+ pm_api_arg[1] == IOCTL_READ_GGS ||
+ pm_api_arg[1] == IOCTL_READ_PGGS))
+ sprintf(debugfs_buf, "IOCTL return value: %u\n",
+ pm_api_ret[1]);
+ break;
+ case PM_CLOCK_ENABLE:
+ ret = eemi_ops->clock_enable(pm_api_arg[0]);
+ break;
+ case PM_CLOCK_DISABLE:
+ ret = eemi_ops->clock_disable(pm_api_arg[0]);
+ break;
+ case PM_CLOCK_GETSTATE:
+ ret = eemi_ops->clock_getstate(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Clock state: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_CLOCK_SETDIVIDER:
+ ret = eemi_ops->clock_setdivider(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETDIVIDER:
+ ret = eemi_ops->clock_getdivider(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Divider Value: %d\n",
+ pm_api_ret[0]);
+ break;
+ case PM_CLOCK_SETRATE:
+ ret = eemi_ops->clock_setrate(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETRATE:
+ ret = eemi_ops->clock_getrate(pm_api_arg[0], &rate);
+ if (!ret)
+ sprintf(debugfs_buf, "Clock rate :%llu\n", rate);
+ break;
+ case PM_CLOCK_SETPARENT:
+ ret = eemi_ops->clock_setparent(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETPARENT:
+ ret = eemi_ops->clock_getparent(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Clock parent Index: %u\n", pm_api_ret[0]);
+ break;
case PM_QUERY_DATA:
qdata.qid = pm_api_arg[0];
qdata.arg1 = pm_api_arg[1];
diff --git a/drivers/firmware/xilinx/zynqmp-ggs.c b/drivers/firmware/xilinx/zynqmp-ggs.c
new file mode 100644
index 000000000000..42179ad73c7f
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-ggs.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ * Copyright (C) 2014-2018 Xilinx, Inc.
+ *
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/of.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+static ssize_t read_register(char *buf, u32 ioctl_id, u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops->ioctl)
+ return -EFAULT;
+
+ ret = eemi_ops->ioctl(0, ioctl_id, reg, 0, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t write_register(const char *buf, size_t count, u32 read_ioctl,
+ u32 write_ioctl, u32 reg)
+{
+ char *kern_buff, *inbuf, *tok;
+ long mask, value;
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops->ioctl)
+ return -EFAULT;
+
+ kern_buff = kzalloc(count, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+
+ ret = strlcpy(kern_buff, buf, count);
+ if (ret < 0) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ inbuf = kern_buff;
+
+ /* Read the write mask */
+ tok = strsep(&inbuf, " ");
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = kstrtol(tok, 16, &mask);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* Read the write value */
+ tok = strsep(&inbuf, " ");
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = kstrtol(tok, 16, &value);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = eemi_ops->ioctl(0, read_ioctl, reg, 0, ret_payload);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ ret_payload[1] &= ~mask;
+ value &= mask;
+ value |= ret_payload[1];
+
+ ret = eemi_ops->ioctl(0, write_ioctl, reg, value, NULL);
+ if (ret)
+ ret = -EFAULT;
+
+err:
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+/**
+ * ggs_show - Show global general storage (ggs) sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: Requested available shutdown_scope attributes string
+ * @reg: Register number
+ *
+ * Return:Number of bytes printed into the buffer.
+ *
+ * Helper function for viewing a ggs register value.
+ *
+ * User-space interface for viewing the content of the ggs0 register.
+ * cat /sys/firmware/zynqmp/ggs0
+ */
+static ssize_t ggs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ return read_register(buf, IOCTL_READ_GGS, reg);
+}
+
+/**
+ * ggs_store - Store global general storage (ggs) sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered shutdown_scope attribute string
+ * @count: Size of buf
+ * @reg: Register number
+ *
+ * Return: count argument if request succeeds, the corresponding
+ * error code otherwise
+ *
+ * Helper function for storing a ggs register value.
+ *
+ * For example, the user-space interface for storing a value to the
+ * ggs0 register:
+ * echo 0xFFFFFFFF 0x1234ABCD > /sys/firmware/zynqmp/ggs0
+ */
+static ssize_t ggs_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count,
+ u32 reg)
+{
+ if (!kobj || !attr || !buf || !count || reg >= GSS_NUM_REGS)
+ return -EINVAL;
+
+ return write_register(buf, count, IOCTL_READ_GGS, IOCTL_WRITE_GGS, reg);
+}
+
+/* GGS register show functions */
+#define GGS0_SHOW(N) \
+ ssize_t ggs##N##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *buf) \
+ { \
+ return ggs_show(kobj, attr, buf, N); \
+ }
+
+static GGS0_SHOW(0);
+static GGS0_SHOW(1);
+static GGS0_SHOW(2);
+static GGS0_SHOW(3);
+
+/* GGS register store function */
+#define GGS0_STORE(N) \
+ ssize_t ggs##N##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return ggs_store(kobj, attr, buf, count, N); \
+ }
+
+static GGS0_STORE(0);
+static GGS0_STORE(1);
+static GGS0_STORE(2);
+static GGS0_STORE(3);
+
+/**
+ * pggs_show - Show persistent global general storage (pggs) sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: Requested available shutdown_scope attributes string
+ * @reg: Register number
+ *
+ * Return:Number of bytes printed into the buffer.
+ *
+ * Helper function for viewing a pggs register value.
+ */
+static ssize_t pggs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ return read_register(buf, IOCTL_READ_PGGS, reg);
+}
+
+/**
+ * pggs_store - Store persistent global general storage (pggs) sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered shutdown_scope attribute string
+ * @count: Size of buf
+ * @reg: Register number
+ *
+ * Return: count argument if request succeeds, the corresponding
+ * error code otherwise
+ *
+ * Helper function for storing a pggs register value.
+ */
+static ssize_t pggs_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count,
+ u32 reg)
+{
+ return write_register(buf, count, IOCTL_READ_PGGS,
+ IOCTL_WRITE_PGGS, reg);
+}
+
+#define PGGS0_SHOW(N) \
+ ssize_t pggs##N##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *buf) \
+ { \
+ return pggs_show(kobj, attr, buf, N); \
+ }
+
+#define PGGS0_STORE(N) \
+ ssize_t pggs##N##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return pggs_store(kobj, attr, buf, count, N); \
+ }
+
+/* PGGS register show functions */
+static PGGS0_SHOW(0);
+static PGGS0_SHOW(1);
+static PGGS0_SHOW(2);
+static PGGS0_SHOW(3);
+
+/* PGGS register store functions */
+static PGGS0_STORE(0);
+static PGGS0_STORE(1);
+static PGGS0_STORE(2);
+static PGGS0_STORE(3);
+
+/* GGS register attributes */
+static struct kobj_attribute zynqmp_attr_ggs0 = __ATTR_RW(ggs0);
+static struct kobj_attribute zynqmp_attr_ggs1 = __ATTR_RW(ggs1);
+static struct kobj_attribute zynqmp_attr_ggs2 = __ATTR_RW(ggs2);
+static struct kobj_attribute zynqmp_attr_ggs3 = __ATTR_RW(ggs3);
+
+/* PGGS register attributes */
+static struct kobj_attribute zynqmp_attr_pggs0 = __ATTR_RW(pggs0);
+static struct kobj_attribute zynqmp_attr_pggs1 = __ATTR_RW(pggs1);
+static struct kobj_attribute zynqmp_attr_pggs2 = __ATTR_RW(pggs2);
+static struct kobj_attribute zynqmp_attr_pggs3 = __ATTR_RW(pggs3);
+
+static struct attribute *attrs[] = {
+ &zynqmp_attr_ggs0.attr,
+ &zynqmp_attr_ggs1.attr,
+ &zynqmp_attr_ggs2.attr,
+ &zynqmp_attr_ggs3.attr,
+ &zynqmp_attr_pggs0.attr,
+ &zynqmp_attr_pggs1.attr,
+ &zynqmp_attr_pggs2.attr,
+ &zynqmp_attr_pggs3.attr,
+ NULL,
+};
+
+static const struct attribute_group attr_group = {
+ .attrs = attrs,
+ NULL,
+};
+
+int zynqmp_pm_ggs_init(struct kobject *parent_kobj)
+{
+ return sysfs_create_group(parent_kobj, &attr_group);
+}
diff --git a/drivers/firmware/xilinx/zynqmp-secure.c b/drivers/firmware/xilinx/zynqmp-secure.c
new file mode 100644
index 000000000000..1d105e04239f
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-secure.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP SecureFw Driver.
+ * Copyright (c) 2018 Xilinx Inc.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+
+#define ZYNQMP_AES_KEY_SIZE 64
+
+static u8 key[ZYNQMP_AES_KEY_SIZE] = {0};
+static dma_addr_t dma_addr;
+static u8 *keyptr;
+static size_t dma_size;
+static char *kbuf;
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+static ssize_t secure_load_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ const struct firmware *fw;
+ char image_name[NAME_MAX];
+ u64 dst, ret;
+ int len;
+
+ if (!eemi_ops || !eemi_ops->secure_image)
+ return -EFAULT;
+
+ strncpy(image_name, buf, NAME_MAX);
+ len = strlen(image_name);
+ if (image_name[len - 1] == '\n')
+ image_name[len - 1] = 0;
+
+ ret = request_firmware(&fw, image_name, dev);
+ if (ret) {
+ dev_err(dev, "Error requesting firmware %s\n", image_name);
+ return ret;
+ }
+ dma_size = fw->size;
+
+ if (keyptr)
+ dma_size = fw->size + ZYNQMP_AES_KEY_SIZE;
+
+ kbuf = dma_alloc_coherent(dev, dma_size,
+ &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ memcpy(kbuf, fw->data, fw->size);
+
+ if (keyptr)
+ memcpy(kbuf + fw->size, key, ZYNQMP_AES_KEY_SIZE);
+
+ /* To ensure cache coherency */
+ __flush_cache_user_range((unsigned long)kbuf,
+ (unsigned long)kbuf + dma_size);
+ release_firmware(fw);
+
+ if (keyptr)
+ ret = eemi_ops->secure_image(dma_addr, dma_addr + fw->size,
+ &dst);
+ else
+ ret = eemi_ops->secure_image(dma_addr, 0, &dst);
+
+ if (ret) {
+ dev_info(dev, "Failed to load secure image \r\n");
+ return ret;
+ }
+ dev_info(dev, "Verified image at 0x%llx\n", dst);
+
+ return count;
+}
+
+static ssize_t key_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, ZYNQMP_AES_KEY_SIZE + 1, "%s\n", key);
+}
+
+static ssize_t key_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ memcpy(key, buf, count);
+ keyptr = &key[0];
+ return count;
+}
+
+static ssize_t secure_load_done_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+ if (value)
+ dma_free_coherent(dev, dma_size, kbuf, dma_addr);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(key);
+static DEVICE_ATTR_WO(secure_load);
+static DEVICE_ATTR_WO(secure_load_done);
+
+static struct attribute *securefw_attrs[] = {
+ &dev_attr_secure_load_done.attr,
+ &dev_attr_secure_load.attr,
+ &dev_attr_key.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(securefw);
+
+static int securefw_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct platform_device *securefw_pdev;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ securefw_pdev = pdev;
+
+ securefw_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+ ret = of_dma_configure(&securefw_pdev->dev, NULL, true);
+ if (ret < 0) {
+ dev_info(&securefw_pdev->dev, "Cannot setup DMA ops\r\n");
+ return ret;
+ }
+
+ ret = sysfs_create_groups(&securefw_pdev->dev.kobj, securefw_groups);
+ if (ret)
+ return ret;
+
+ dev_info(&securefw_pdev->dev, "securefw probed\r\n");
+ return ret;
+}
+
+static int securefw_remove(struct platform_device *pdev)
+{
+ sysfs_remove_groups(&pdev->dev.kobj, securefw_groups);
+ return 0;
+}
+
+static struct platform_driver securefw_driver = {
+ .driver = {
+ .name = "securefw",
+ },
+ .probe = securefw_probe,
+ .remove = securefw_remove,
+};
+
+static struct platform_device *securefw_dev_reg;
+
+static int __init zynqmp_secure_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&securefw_driver);
+ if (ret)
+ return ret;
+
+ securefw_dev_reg = platform_device_register_simple("securefw", -1,
+ NULL, 0);
+ if (IS_ERR(securefw_dev_reg)) {
+ ret = PTR_ERR(securefw_dev_reg);
+ platform_driver_unregister(&securefw_driver);
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit zynqmp_secure_exit(void)
+{
+ platform_device_unregister(securefw_dev_reg);
+ platform_driver_unregister(&securefw_driver);
+}
+
+module_init(zynqmp_secure_init);
+module_exit(zynqmp_secure_exit);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index fd3d83745208..5d085ff3220a 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -24,8 +24,13 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
+static unsigned long register_address;
+
static const struct zynqmp_eemi_ops *eemi_ops_tbl;
+static bool feature_check_enabled;
+static u32 zynqmp_pm_features[PM_API_MAX];
+
static const struct mfd_cell firmware_devs[] = {
{
.name = "zynqmp_power_controller",
@@ -44,10 +49,14 @@ static int zynqmp_pm_ret_code(u32 ret_status)
case XST_PM_SUCCESS:
case XST_PM_DOUBLE_REQ:
return 0;
+ case XST_PM_NO_FEATURE:
+ return -ENOTSUPP;
case XST_PM_NO_ACCESS:
return -EACCES;
case XST_PM_ABORT_SUSPEND:
return -ECANCELED;
+ case XST_PM_MULT_USER:
+ return -EUSERS;
case XST_PM_INTERNAL:
case XST_PM_CONFLICT:
case XST_PM_INVALID_NODE:
@@ -127,6 +136,39 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
}
/**
+ * zynqmp_pm_feature() - Check weather given feature is supported or not
+ * @api_id: API ID to check
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_feature(u32 api_id)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ u64 smc_arg[2];
+
+ if (!feature_check_enabled)
+ return 0;
+
+ /* Return value if feature is already checked */
+ if (zynqmp_pm_features[api_id] != PM_FEATURE_UNCHECKED)
+ return zynqmp_pm_features[api_id];
+
+ smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
+ smc_arg[1] = api_id;
+
+ ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
+ if (ret) {
+ zynqmp_pm_features[api_id] = PM_FEATURE_INVALID;
+ return PM_FEATURE_INVALID;
+ }
+
+ zynqmp_pm_features[api_id] = ret_payload[1];
+
+ return zynqmp_pm_features[api_id];
+}
+
+/**
* zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
* caller function depending on the configuration
* @pm_api_id: Requested PM-API call
@@ -160,6 +202,9 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
*/
u64 smc_arg[4];
+ if (zynqmp_pm_feature(pm_api_id) == PM_FEATURE_INVALID)
+ return -ENOTSUPP;
+
smc_arg[0] = PM_SIP_SVC | pm_api_id;
smc_arg[1] = ((u64)arg1 << 32) | arg0;
smc_arg[2] = ((u64)arg3 << 32) | arg2;
@@ -197,11 +242,11 @@ static int zynqmp_pm_get_api_version(u32 *version)
/**
* zynqmp_pm_get_chipid - Get silicon ID registers
- * @idcode: IDCODE register
- * @version: version register
+ * @idcode: IDCODE register
+ * @version: version register
*
- * Return: Returns the status of the operation and the idcode and version
- * registers in @idcode and @version.
+ * Return: Returns the status of the operation and the idcode and version
+ * registers in @idcode and @version.
*/
static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
{
@@ -473,6 +518,21 @@ static inline int zynqmp_is_valid_ioctl(u32 ioctl_id)
case IOCTL_GET_PLL_FRAC_MODE:
case IOCTL_SET_PLL_FRAC_DATA:
case IOCTL_GET_PLL_FRAC_DATA:
+ case IOCTL_GET_RPU_OPER_MODE:
+ case IOCTL_SET_RPU_OPER_MODE:
+ case IOCTL_RPU_BOOT_ADDR_CONFIG:
+ case IOCTL_TCM_COMB_CONFIG:
+ case IOCTL_SET_TAPDELAY_BYPASS:
+ case IOCTL_SET_SGMII_MODE:
+ case IOCTL_SD_DLL_RESET:
+ case IOCTL_SET_SD_TAPDELAY:
+ case IOCTL_WRITE_GGS:
+ case IOCTL_READ_GGS:
+ case IOCTL_WRITE_PGGS:
+ case IOCTL_READ_PGGS:
+ case IOCTL_ULPI_RESET:
+ case IOCTL_SET_BOOT_HEALTH_STATUS:
+ case IOCTL_AFI:
return 1;
default:
return 0;
@@ -540,6 +600,22 @@ static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
}
/**
+ * zynqmp_pm_load_pdi - Load and process pdi
+ * @src: Source device where PDI is located
+ * @address: Pdi src address
+ *
+ * This function provides support to load pdi from linux
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_load_pdi(const u32 src, const u64 address)
+{
+ return zynqmp_pm_invoke_fn(PM_LOAD_PDI, src,
+ lower_32_bits(address),
+ upper_32_bits(address), 0, NULL);
+}
+
+/**
* zynqmp_pm_fpga_load - Perform the fpga load
* @address: Address to write to
* @size: pl bitstream size
@@ -664,6 +740,449 @@ static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
qos, ack, NULL);
}
+/**
+ * zynqmp_pm_fpga_read - Perform the fpga configuration readback
+ * @reg_numframes: Configuration register offset (or) Number of frames to read
+ * @phys_address: Physical Address of the buffer
+ * @readback_type: Type of fpga readback operation
+ * @value: Value to read
+ *
+ * This function provides access to xilfpga library to perform
+ * fpga configuration readback.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_fpga_read(const u32 reg_numframes, const u64 phys_address,
+ u32 readback_type, u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, reg_numframes,
+ lower_32_bits(phys_address),
+ upper_32_bits(phys_address), readback_type,
+ ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash
+ * @address: Address of the data/ Address of output buffer where
+ * hash should be stored.
+ * @size: Size of the data.
+ * @flags:
+ * BIT(0) - for initializing csudma driver and SHA3(Here address
+ * and size inputs can be NULL).
+ * BIT(1) - to call Sha3_Update API which can be called multiple
+ * times when data is not contiguous.
+ * BIT(2) - to get final hash of the whole updated data.
+ * Hash will be overwritten at provided address with
+ * 48 bytes.
+ *
+ * Return: Returns status, either success or error code.
+ */
+static int zynqmp_pm_sha_hash(const u64 address, const u32 size,
+ const u32 flags)
+{
+ u32 lower_32_bits = (u32)address;
+ u32 upper_32_bits = (u32)(address >> 32);
+
+ return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_32_bits, lower_32_bits,
+ size, flags, NULL);
+}
+
+/**
+ * zynqmp_pm_rsa - Access RSA hardware to encrypt/decrypt the data with RSA.
+ * @address: Address of the data
+ * @size: Size of the data.
+ * @flags:
+ * BIT(0) - Encryption/Decryption
+ * 0 - RSA decryption with private key
+ * 1 - RSA encryption with public key.
+ *
+ * Return: Returns status, either success or error code.
+ */
+static int zynqmp_pm_rsa(const u64 address, const u32 size, const u32 flags)
+{
+ u32 lower_32_bits = (u32)address;
+ u32 upper_32_bits = (u32)(address >> 32);
+
+ return zynqmp_pm_invoke_fn(PM_SECURE_RSA, upper_32_bits, lower_32_bits,
+ size, flags, NULL);
+}
+
+/**
+ * zynqmp_pm_aes - Access AES hardware to encrypt/decrypt the data using
+ * AES-GCM core.
+ * @address: Address of the AesParams structure.
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error code.
+ */
+static int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!out)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, upper_32_bits(address),
+ lower_32_bits(address),
+ 0, 0, ret_payload);
+ *out = ret_payload[1];
+ return ret;
+}
+
+/**
+ * zynqmp_pm_request_suspend - PM call to request for another PU or subsystem to
+ * be suspended gracefully.
+ * @node: Node ID of the targeted PU or subsystem
+ * @ack: Flag to specify whether acknowledge is requested
+ * @latency: Requested wakeup latency (not supported)
+ * @state: Requested state (not supported)
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_request_suspend(const u32 node,
+ const enum zynqmp_pm_request_ack ack,
+ const u32 latency,
+ const u32 state)
+{
+ return zynqmp_pm_invoke_fn(PM_REQUEST_SUSPEND, node, ack,
+ latency, state, NULL);
+}
+
+/**
+ * zynqmp_pm_force_powerdown - PM call to request for another PU or subsystem to
+ * be powered down forcefully
+ * @target: Node ID of the targeted PU or subsystem
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_force_powerdown(const u32 target,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, target, ack, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_request_wakeup - PM call to wake up selected master or subsystem
+ * @node: Node ID of the master or subsystem
+ * @set_addr: Specifies whether the address argument is relevant
+ * @address: Address from which to resume when woken up
+ * @ack: Flag to specify whether acknowledge requested
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_request_wakeup(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack)
+{
+ /* set_addr flag is encoded into 1st bit of address */
+ return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, node, address | set_addr,
+ address >> 32, ack, NULL);
+}
+
+/**
+ * zynqmp_pm_set_wakeup_source - PM call to specify the wakeup source
+ * while suspended
+ * @target: Node ID of the targeted PU or subsystem
+ * @wakeup_node:Node ID of the wakeup peripheral
+ * @enable: Enable or disable the specified peripheral as wake source
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_wakeup_source(const u32 target,
+ const u32 wakeup_node,
+ const u32 enable)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_WAKEUP_SOURCE, target,
+ wakeup_node, enable, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart
+ * @type: Shutdown or restart? 0 for shutdown, 1 for restart
+ * @subtype: Specifies which system should be restarted or shut down
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, type, subtype,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_max_latency - PM call to set wakeup latency requirements
+ * @node: Node ID of the slave
+ * @latency: Requested maximum wakeup latency
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_max_latency(const u32 node, const u32 latency)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_MAX_LATENCY, node, latency,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_configuration - PM call to set system configuration
+ * @physical_addr: Physical 32-bit address of data structure in memory
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_configuration(const u32 physical_addr)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_CONFIGURATION, physical_addr, 0,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_get_node_status - PM call to request a node's current power state
+ * @node: ID of the component or sub-system in question
+ * @status: Current operating state of the requested node
+ * @requirements: Current requirements asserted on the node,
+ * used for slave nodes only.
+ * @usage: Usage information, used for slave nodes only:
+ * PM_USAGE_NO_MASTER - No master is currently using
+ * the node
+ * PM_USAGE_CURRENT_MASTER - Only requesting master is
+ * currently using the node
+ * PM_USAGE_OTHER_MASTER - Only other masters are
+ * currently using the node
+ * PM_USAGE_BOTH_MASTERS - Both the current and at least
+ * one other master is currently
+ * using the node
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
+ u32 *const requirements, u32 *const usage)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!status)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_NODE_STATUS, node, 0, 0,
+ 0, ret_payload);
+ if (ret_payload[0] == XST_PM_SUCCESS) {
+ *status = ret_payload[1];
+ if (requirements)
+ *requirements = ret_payload[2];
+ if (usage)
+ *usage = ret_payload[3];
+ }
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_get_operating_characteristic - PM call to request operating
+ * characteristic information
+ * @node: Node ID of the slave
+ * @type: Type of the operating characteristic requested
+ * @result: Used to return the requsted operating characteristic
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_operating_characteristic(const u32 node,
+ const enum zynqmp_pm_opchar_type type,
+ u32 *const result)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!result)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_OPERATING_CHARACTERISTIC,
+ node, type, 0, 0, ret_payload);
+ if (ret_payload[0] == XST_PM_SUCCESS)
+ *result = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_pinctrl_request - Request Pin from firmware
+ * @pin: Pin number to request
+ *
+ * This function requests pin from firmware.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_request(const u32 pin)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, pin, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_pinctrl_release - Inform firmware that Pin control is released
+ * @pin: Pin number to release
+ *
+ * This function release pin from firmware.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_release(const u32 pin)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, pin, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_pinctrl_get_function - Read function id set for the given pin
+ * @pin: Pin number
+ * @id: Buffer to store function ID
+ *
+ * This function provides the function currently set for the given pin.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!id)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_PINCTRL_GET_FUNCTION, pin, 0,
+ 0, 0, ret_payload);
+ *id = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_pinctrl_set_function - Set requested function for the pin
+ * @pin: Pin number
+ * @id: Function ID to set
+ *
+ * This function sets requested function for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, pin, id,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_pinctrl_get_config - Get configuration parameter for the pin
+ * @pin: Pin number
+ * @param: Parameter to get
+ * @value: Buffer to store parameter value
+ *
+ * This function gets requested configuration parameter for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, pin, param,
+ 0, 0, ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_pinctrl_set_config - Set configuration parameter for the pin
+ * @pin: Pin number
+ * @param: Parameter to set
+ * @value: Parameter value to set
+ *
+ * This function sets requested configuration parameter for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, pin,
+ param, value, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_config_reg_access - PM Config API for Config register access
+ * @register_access_id: ID of the requested REGISTER_ACCESS
+ * @address: Address of the register to be accessed
+ * @mask: Mask to be written to the register
+ * @value: Value to be written to the register
+ * @out: Returned output value
+ *
+ * This function calls REGISTER_ACCESS to configure CSU/PMU registers.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+
+static int zynqmp_pm_config_reg_access(u32 register_access_id, u32 address,
+ u32 mask, u32 value, u32 *out)
+{
+ return zynqmp_pm_invoke_fn(PM_REGISTER_ACCESS, register_access_id,
+ address, mask, value, out);
+}
+
+/**
+ * zynqmp_pm_efuse_access - Provides access to efuse memory.
+ * @address: Address of the efuse params structure
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error code.
+ */
+static int zynqmp_pm_efuse_access(const u64 address, u32 *out)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!out)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_EFUSE_ACCESS, upper_32_bits(address),
+ lower_32_bits(address), 0, 0, ret_payload);
+ *out = ret_payload[1];
+
+ return ret;
+}
+
+static int zynqmp_pm_secure_load(const u64 src_addr, u64 key_addr, u64 *dst)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret_value;
+
+ if (!dst)
+ return -EINVAL;
+
+ ret_value = zynqmp_pm_invoke_fn(PM_SECURE_IMAGE,
+ lower_32_bits(src_addr),
+ upper_32_bits(src_addr),
+ lower_32_bits(key_addr),
+ upper_32_bits(key_addr),
+ ret_payload);
+ *dst = ((u64)ret_payload[1] << 32) | ret_payload[2];
+
+ return ret_value;
+}
+
static const struct zynqmp_eemi_ops eemi_ops = {
.get_api_version = zynqmp_pm_get_api_version,
.get_chipid = zynqmp_pm_get_chipid,
@@ -687,6 +1206,29 @@ static const struct zynqmp_eemi_ops eemi_ops = {
.set_requirement = zynqmp_pm_set_requirement,
.fpga_load = zynqmp_pm_fpga_load,
.fpga_get_status = zynqmp_pm_fpga_get_status,
+ .fpga_read = zynqmp_pm_fpga_read,
+ .sha_hash = zynqmp_pm_sha_hash,
+ .rsa = zynqmp_pm_rsa,
+ .request_suspend = zynqmp_pm_request_suspend,
+ .force_powerdown = zynqmp_pm_force_powerdown,
+ .request_wakeup = zynqmp_pm_request_wakeup,
+ .set_wakeup_source = zynqmp_pm_set_wakeup_source,
+ .system_shutdown = zynqmp_pm_system_shutdown,
+ .set_max_latency = zynqmp_pm_set_max_latency,
+ .set_configuration = zynqmp_pm_set_configuration,
+ .get_node_status = zynqmp_pm_get_node_status,
+ .get_operating_characteristic = zynqmp_pm_get_operating_characteristic,
+ .pinctrl_request = zynqmp_pm_pinctrl_request,
+ .pinctrl_release = zynqmp_pm_pinctrl_release,
+ .pinctrl_get_function = zynqmp_pm_pinctrl_get_function,
+ .pinctrl_set_function = zynqmp_pm_pinctrl_set_function,
+ .pinctrl_get_config = zynqmp_pm_pinctrl_get_config,
+ .pinctrl_set_config = zynqmp_pm_pinctrl_set_config,
+ .register_access = zynqmp_pm_config_reg_access,
+ .aes = zynqmp_pm_aes_engine,
+ .efuse_access = zynqmp_pm_efuse_access,
+ .secure_image = zynqmp_pm_secure_load,
+ .pdi_load = zynqmp_pm_load_pdi,
};
/**
@@ -704,6 +1246,350 @@ const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_eemi_ops);
+/**
+ * struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
+ * @subtype: Shutdown subtype
+ * @name: Matching string for scope argument
+ *
+ * This struct encapsulates mapping between shutdown scope ID and string.
+ */
+struct zynqmp_pm_shutdown_scope {
+ const enum zynqmp_pm_shutdown_subtype subtype;
+ const char *name;
+};
+
+static struct zynqmp_pm_shutdown_scope shutdown_scopes[] = {
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ .name = "subsystem",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ .name = "ps_only",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+ .name = "system",
+ },
+};
+
+static struct zynqmp_pm_shutdown_scope *selected_scope =
+ &shutdown_scopes[ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM];
+
+/**
+ * zynqmp_pm_is_shutdown_scope_valid - Check if shutdown scope string is valid
+ * @scope_string: Shutdown scope string
+ *
+ * Return: Return pointer to matching shutdown scope struct from
+ * array of available options in system if string is valid,
+ * otherwise returns NULL.
+ */
+static struct zynqmp_pm_shutdown_scope*
+ zynqmp_pm_is_shutdown_scope_valid(const char *scope_string)
+{
+ int count;
+
+ for (count = 0; count < ARRAY_SIZE(shutdown_scopes); count++)
+ if (sysfs_streq(scope_string, shutdown_scopes[count].name))
+ return &shutdown_scopes[count];
+
+ return NULL;
+}
+
+/**
+ * shutdown_scope_show - Show shutdown_scope sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: Requested available shutdown_scope attributes string
+ *
+ * User-space interface for viewing the available scope options for system
+ * shutdown. Scope option for next shutdown call is marked with [].
+ *
+ * Usage: cat /sys/firmware/zynqmp/shutdown_scope
+ *
+ * Return: Number of bytes printed into the buffer.
+ */
+static ssize_t shutdown_scope_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(shutdown_scopes); i++) {
+ if (&shutdown_scopes[i] == selected_scope) {
+ strcat(buf, "[");
+ strcat(buf, shutdown_scopes[i].name);
+ strcat(buf, "]");
+ } else {
+ strcat(buf, shutdown_scopes[i].name);
+ }
+ strcat(buf, " ");
+ }
+ strcat(buf, "\n");
+
+ return strlen(buf);
+}
+
+/**
+ * shutdown_scope_store - Store shutdown_scope sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered shutdown_scope attribute string
+ * @count: Buffer size
+ *
+ * User-space interface for setting the scope for the next system shutdown.
+ * Usage: echo <scope> > /sys/firmware/zynqmp/shutdown_scope
+ *
+ * The Linux shutdown functionality implemented via PSCI system_off does not
+ * include an option to set a scope, i.e. which parts of the system to shut
+ * down.
+ *
+ * This API function allows to set the shutdown scope for the next shutdown
+ * request by passing it to the ATF running in EL3. When the next shutdown
+ * is performed, the platform specific portion of PSCI-system_off can use
+ * the chosen shutdown scope.
+ *
+ * subsystem: Only the APU along with all of its peripherals not used by other
+ * processing units will be shut down. This may result in the FPD
+ * power domain being shut down provided that no other processing
+ * unit uses FPD peripherals or DRAM.
+ * ps_only: The complete PS will be shut down, including the RPU, PMU, etc.
+ * Only the PL domain (FPGA) remains untouched.
+ * system: The complete system/device is shut down.
+ *
+ * Return: count argument if request succeeds, the corresponding error
+ * code otherwise
+ */
+static ssize_t shutdown_scope_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct zynqmp_pm_shutdown_scope *scope;
+
+ scope = zynqmp_pm_is_shutdown_scope_valid(buf);
+ if (!scope)
+ return -EINVAL;
+
+ ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+ scope->subtype);
+ if (ret) {
+ pr_err("unable to set shutdown scope %s\n", buf);
+ return ret;
+ }
+
+ selected_scope = scope;
+
+ return count;
+}
+
+static struct kobj_attribute zynqmp_attr_shutdown_scope =
+ __ATTR_RW(shutdown_scope);
+
+/**
+ * health_status_store - Store health_status sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered health_status attribute string
+ * @count: Buffer size
+ *
+ * User-space interface for setting the boot health status.
+ * Usage: echo <value> > /sys/firmware/zynqmp/health_status
+ *
+ * Value:
+ * 1 - Set healthy bit to 1
+ * 0 - Unset healthy bit
+ *
+ * Return: count argument if request succeeds, the corresponding error
+ * code otherwise
+ */
+static ssize_t health_status_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_ioctl(0, IOCTL_SET_BOOT_HEALTH_STATUS, value, 0, NULL);
+ if (ret) {
+ pr_err("unable to set healthy bit value to %u\n", value);
+ return ret;
+ }
+
+ return count;
+}
+
+static struct kobj_attribute zynqmp_attr_health_status =
+ __ATTR_WO(health_status);
+
+/**
+ * config_reg_store - Write config_reg sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered health_status attribute string
+ * @count: Buffer size
+ *
+ * User-space interface for setting the config register.
+ *
+ * To write any CSU/PMU register
+ * echo <address> <mask> <values> > /sys/firmware/zynqmp/config_reg
+ * Usage:
+ * echo 0x345AB234 0xFFFFFFFF 0x1234ABCD > /sys/firmware/zynqmp/config_reg
+ *
+ * To Read any CSU/PMU register, write address to the variable like below
+ * echo <address> > /sys/firmware/zynqmp/config_reg
+ *
+ * Return: count argument if request succeeds, the corresponding error
+ * code otherwise
+ */
+static ssize_t config_reg_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ char *kern_buff, *inbuf, *tok;
+ unsigned long address, value, mask;
+ int ret;
+
+ kern_buff = kzalloc(count, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+
+ ret = strlcpy(kern_buff, buf, count);
+ if (ret < 0) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ inbuf = kern_buff;
+
+ /* Read the addess */
+ tok = strsep(&inbuf, " ");
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+ ret = kstrtol(tok, 16, &address);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ /* Read the write value */
+ tok = strsep(&inbuf, " ");
+ /*
+ * If parameter provided is only address, then its a read operation.
+ * Store the address in a global variable and retrieve whenever
+ * required.
+ */
+ if (!tok) {
+ register_address = address;
+ goto err;
+ }
+ register_address = address;
+
+ ret = kstrtol(tok, 16, &mask);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ tok = strsep(&inbuf, " ");
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+ ret = kstrtol(tok, 16, &value);
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+ ret = zynqmp_pm_config_reg_access(CONFIG_REG_WRITE, address,
+ mask, value, NULL);
+ if (ret)
+ pr_err("unable to write value to %lx\n", value);
+err:
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+ return count;
+}
+
+/**
+ * config_reg_show - Read config_reg sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered health_status attribute string
+ *
+ * User-space interface for getting the config register.
+ *
+ * To Read any CSU/PMU register, write address to the variable like below
+ * echo <address> > /sys/firmware/zynqmp/config_reg
+ *
+ * Then Read the address using below command
+ * cat /sys/firmware/zynqmp/config_reg
+ *
+ * Return: number of chars written to buf.
+ */
+static ssize_t config_reg_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_config_reg_access(CONFIG_REG_READ, register_address,
+ 0, 0, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static struct kobj_attribute zynqmp_attr_config_reg =
+ __ATTR_RW(config_reg);
+
+static struct attribute *attrs[] = {
+ &zynqmp_attr_shutdown_scope.attr,
+ &zynqmp_attr_health_status.attr,
+ &zynqmp_attr_config_reg.attr,
+ NULL,
+};
+
+static const struct attribute_group attr_group = {
+ .attrs = attrs,
+ NULL,
+};
+
+static int zynqmp_pm_sysfs_init(void)
+{
+ struct kobject *zynqmp_kobj;
+ int ret;
+
+ zynqmp_kobj = kobject_create_and_add("zynqmp", firmware_kobj);
+ if (!zynqmp_kobj) {
+ pr_err("zynqmp: Firmware kobj add failed.\n");
+ return -ENOMEM;
+ }
+
+ ret = sysfs_create_group(zynqmp_kobj, &attr_group);
+ if (ret) {
+ pr_err("%s() sysfs creation fail with error %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ ret = zynqmp_pm_ggs_init(zynqmp_kobj);
+ if (ret) {
+ pr_err("%s() GGS init fail with error %d\n",
+ __func__, ret);
+ goto err;
+ }
+err:
+ return ret;
+}
+
static int zynqmp_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -711,8 +1597,13 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
int ret;
np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp");
- if (!np)
- return 0;
+ if (!np) {
+ np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
+ if (!np)
+ return 0;
+
+ feature_check_enabled = true;
+ }
of_node_put(np);
ret = get_set_conduit_method(dev->of_node);
@@ -748,6 +1639,12 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
/* Assign eemi_ops_table */
eemi_ops_tbl = &eemi_ops;
+ ret = zynqmp_pm_sysfs_init();
+ if (ret) {
+ pr_err("%s() sysfs init fail with error %d\n", __func__, ret);
+ return ret;
+ }
+
zynqmp_pm_api_debugfs_init();
ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
@@ -770,6 +1667,7 @@ static int zynqmp_firmware_remove(struct platform_device *pdev)
static const struct of_device_id zynqmp_firmware_of_match[] = {
{.compatible = "xlnx,zynqmp-firmware"},
+ {.compatible = "xlnx,versal-firmware-wip"},
{},
};
MODULE_DEVICE_TABLE(of, zynqmp_firmware_of_match);
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index dd414250e77e..e3e949319099 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -91,6 +91,24 @@ config FPGA_MGR_TS73XX
FPGA manager driver support for the Altera Cyclone II FPGA
present on the TS-73xx SBC boards.
+config FPGA_MGR_ZYNQ_AFI_FPGA
+ bool "Xilinx AFI FPGA"
+ depends on FPGA_MGR_ZYNQ_FPGA
+ help
+ Zynq AFI driver support for writing to the AFI registers
+ for configuring the PS_PL interface. For some of the bitstream
+ or designs to work the PS to PL interfaces need to be configured
+ like the data bus-width etc.
+
+config XILINX_AFI_FPGA
+ bool "Xilinx AFI FPGA"
+ depends on FPGA_MGR_ZYNQMP_FPGA || COMPILE_TEST
+ help
+ FPGA manager driver support for writing to the AFI registers
+ for configuring the PS_PL interface. For some of the bitstream
+ or designs to work the PS to PL interfaces need to be configured
+ like the datawidth etc.
+
config FPGA_BRIDGE
tristate "FPGA Bridge Framework"
help
@@ -138,6 +156,13 @@ config OF_FPGA_REGION
Support for loading FPGA images by applying a Device Tree
overlay.
+config FPGA_MGR_DEBUG_FS
+ tristate "FPGA debug fs"
+ select DEBUG_FS
+ help
+ FPGA manager debug provides support for reading fpga configuration
+ information.
+
config FPGA_DFL
tristate "FPGA Device Feature List (DFL) support"
select FPGA_BRIDGE
@@ -215,4 +240,13 @@ config FPGA_MGR_ZYNQMP_FPGA
to configure the programmable logic(PL) through PS
on ZynqMP SoC.
+config FPGA_MGR_VERSAL_FPGA
+ tristate "Xilinx Versal FPGA"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ help
+ Select this option to enable FPGA manager driver support for
+ Xilinx Versal SOC. This driver uses the versal soc firmware
+ interface to load programmable logic(PL) images
+ on versal soc.
+
endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 312b9371742f..b94ce4c4d91f 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -17,7 +17,10 @@ obj-$(CONFIG_FPGA_MGR_STRATIX10_SOC) += stratix10-soc.o
obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o
obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
+obj-$(CONFIG_FPGA_MGR_ZYNQ_AFI_FPGA) += zynq-afi.o
obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o
+obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o
+obj-$(CONFIG_XILINX_AFI_FPGA) += xilinx-afi.o
obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o
obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 80bd8f1b2aa6..f70328543a1a 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -13,6 +13,12 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+/* For enabling manual bridge set(enable/disable) function */
+#ifdef CONFIG_DEBUG_KERNEL
+#undef DEBUG
+#define DEBUG
+#endif
+
static DEFINE_IDA(fpga_bridge_ida);
static struct class *fpga_bridge_class;
@@ -310,9 +316,33 @@ static ssize_t state_show(struct device *dev,
static DEVICE_ATTR_RO(name);
static DEVICE_ATTR_RO(state);
+#ifdef DEBUG
+static ssize_t set_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+ long enable;
+ int ret;
+
+ ret = kstrtol(buf, 16, &enable);
+ if (ret)
+ return ret;
+
+ if (bridge->br_ops && bridge->br_ops->enable_set)
+ enable = bridge->br_ops->enable_set(bridge, !!enable);
+
+ return count;
+}
+static DEVICE_ATTR_WO(set);
+#endif
+
static struct attribute *fpga_bridge_attrs[] = {
&dev_attr_name.attr,
&dev_attr_state.attr,
+#ifdef DEBUG
+ &dev_attr_set.attr,
+#endif
NULL,
};
ATTRIBUTE_GROUPS(fpga_bridge);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index c3866816456a..83c02c0b4a20 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -8,6 +8,7 @@
* With code from the mailing list:
* Copyright (C) 2013 Xilinx, Inc.
*/
+#include <linux/kernel.h>
#include <linux/firmware.h>
#include <linux/fpga/fpga-mgr.h>
#include <linux/idr.h>
@@ -328,6 +329,11 @@ static int fpga_mgr_firmware_load(struct fpga_manager *mgr,
mgr->state = FPGA_MGR_STATE_FIRMWARE_REQ;
+ /* flags indicates whether to do full or partial reconfiguration */
+ info->flags = mgr->flags;
+ memcpy(info->key, mgr->key, ENCRYPTED_KEY_LEN);
+ memcpy(info->iv, mgr->key, ENCRYPTED_IV_LEN);
+
ret = request_firmware(&fw, image_name, dev);
if (ret) {
mgr->state = FPGA_MGR_STATE_FIRMWARE_REQ_ERR;
@@ -406,6 +412,91 @@ static ssize_t state_show(struct device *dev,
return sprintf(buf, "%s\n", state_str[mgr->state]);
}
+static ssize_t firmware_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+ unsigned int len;
+ char image_name[NAME_MAX];
+ int ret;
+
+ /* struct with information about the FPGA image to program. */
+ struct fpga_image_info info = {0};
+
+ /* lose terminating \n */
+ strcpy(image_name, buf);
+ len = strlen(image_name);
+ if (image_name[len - 1] == '\n')
+ image_name[len - 1] = 0;
+
+ ret = fpga_mgr_firmware_load(mgr, &info, image_name);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t key_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ return snprintf(buf, ENCRYPTED_KEY_LEN + 1, "%s\n", mgr->key);
+}
+
+static ssize_t key_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ memcpy(mgr->key, buf, count);
+
+ return count;
+}
+
+static ssize_t iv_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ return snprintf(buf, ENCRYPTED_IV_LEN + 1, "%s\r\n", mgr->iv);
+}
+
+static ssize_t iv_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ memcpy(mgr->iv, buf, count);
+
+ return count;
+}
+
+static ssize_t flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ return sprintf(buf, "%lx\n", mgr->flags);
+}
+
+static ssize_t flags_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+ int ret;
+
+ ret = kstrtol(buf, 16, &mgr->flags);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -434,11 +525,19 @@ static ssize_t status_show(struct device *dev,
static DEVICE_ATTR_RO(name);
static DEVICE_ATTR_RO(state);
+static DEVICE_ATTR_WO(firmware);
+static DEVICE_ATTR_RW(flags);
+static DEVICE_ATTR_RW(key);
+static DEVICE_ATTR_RW(iv);
static DEVICE_ATTR_RO(status);
static struct attribute *fpga_mgr_attrs[] = {
&dev_attr_name.attr,
&dev_attr_state.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_key.attr,
+ &dev_attr_iv.attr,
&dev_attr_status.attr,
NULL,
};
@@ -518,6 +617,116 @@ void fpga_mgr_put(struct fpga_manager *mgr)
}
EXPORT_SYMBOL_GPL(fpga_mgr_put);
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+#include <linux/debugfs.h>
+
+static int fpga_mgr_read(struct seq_file *s, void *data)
+{
+ struct fpga_manager *mgr = (struct fpga_manager *)s->private;
+ int ret = 0;
+
+ if (!mgr->mops->read)
+ return -ENOENT;
+
+ if (!mutex_trylock(&mgr->ref_mutex))
+ return -EBUSY;
+
+ if (mgr->state != FPGA_MGR_STATE_OPERATING) {
+ ret = -EPERM;
+ goto err_unlock;
+ }
+
+ /* Read the FPGA configuration data from the fabric */
+ ret = mgr->mops->read(mgr, s);
+ if (ret)
+ dev_err(&mgr->dev, "Error while reading configuration data from FPGA\n");
+
+err_unlock:
+ mutex_unlock(&mgr->ref_mutex);
+
+ return ret;
+}
+
+static int fpga_mgr_read_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fpga_mgr_read, inode->i_private);
+}
+
+static const struct file_operations fpga_mgr_ops_image = {
+ .owner = THIS_MODULE,
+ .open = fpga_mgr_read_open,
+ .read = seq_read,
+};
+
+/**
+ * fpga_mgr_debugfs_buf_load() - debugfs write function
+ * @file: User file
+ * @ptr: Fpga Image Address pointer
+ * @len: Length of the image
+ * @off: Offset within the file
+ *
+ * Return: Number of bytes if request succeeds,
+ * the corresponding error code otherwise
+ */
+static ssize_t fpga_mgr_debugfs_buf_load(struct file *file,
+ const char __user *ptr, size_t len,
+ loff_t *off)
+{
+ struct fpga_manager *mgr = file->private_data;
+ struct device *dev = &mgr->dev;
+ char *buf;
+ int ret = 0;
+
+ /* struct with information about the FPGA image to program. */
+ struct fpga_image_info info = {0};
+
+ /* flags indicates whether to do full or partial reconfiguration */
+ info.flags = mgr->flags;
+
+ ret = fpga_mgr_lock(mgr);
+ if (ret) {
+ dev_err(dev, "FPGA manager is busy\n");
+ return -EBUSY;
+ }
+
+ buf = vmalloc(len);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto mgr_unlock;
+ }
+
+ if (copy_from_user(buf, ptr, len)) {
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ info.buf = buf;
+ info.count = len;
+
+ ret = fpga_mgr_load(mgr, &info);
+ if (ret) {
+ dev_err(dev, "fpga_mgr_load returned with value %d\n\r", ret);
+ goto free_buf;
+ }
+
+free_buf:
+ vfree(buf);
+mgr_unlock:
+ fpga_mgr_unlock(mgr);
+
+ if (ret)
+ return ret;
+ else
+ return len;
+}
+
+static const struct file_operations fpga_mgr_ops_load = {
+ .open = simple_open,
+ .write = fpga_mgr_debugfs_buf_load,
+ .llseek = default_llseek,
+};
+#endif
+
/**
* fpga_mgr_lock - Lock FPGA manager for exclusive use
* @mgr: fpga manager
@@ -686,6 +895,9 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_create);
int fpga_mgr_register(struct fpga_manager *mgr)
{
int ret;
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+ struct dentry *d, *parent;
+#endif
/*
* Initialize framework state by requesting low level driver read state
@@ -698,6 +910,33 @@ int fpga_mgr_register(struct fpga_manager *mgr)
if (ret)
goto error_device;
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+ mgr->dir = debugfs_create_dir("fpga", NULL);
+ if (!mgr->dir)
+ goto error_device;
+
+ parent = mgr->dir;
+ d = debugfs_create_dir(mgr->dev.kobj.name, parent);
+ if (!d) {
+ debugfs_remove_recursive(parent);
+ goto error_device;
+ }
+
+ parent = d;
+ d = debugfs_create_file("image", 0644, parent, mgr,
+ &fpga_mgr_ops_image);
+ if (!d) {
+ debugfs_remove_recursive(mgr->dir);
+ goto error_device;
+ }
+
+ d = debugfs_create_file("load", 0644, parent, mgr,
+ &fpga_mgr_ops_load);
+ if (!d) {
+ debugfs_remove_recursive(mgr->dir);
+ goto error_device;
+ }
+#endif
dev_info(&mgr->dev, "%s registered\n", mgr->name);
return 0;
@@ -719,6 +958,10 @@ void fpga_mgr_unregister(struct fpga_manager *mgr)
{
dev_info(&mgr->dev, "%s %s\n", __func__, mgr->name);
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+ debugfs_remove_recursive(mgr->dir);
+#endif
+
/*
* If the low level driver provides a method for putting fpga into
* a desired state upon unregister, do it.
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index bde5a9d460c5..9c8593e5a4fe 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/reset.h>
static DEFINE_IDA(fpga_region_ida);
static struct class *fpga_region_class;
@@ -98,6 +99,7 @@ int fpga_region_program_fpga(struct fpga_region *region)
struct device *dev = &region->dev;
struct fpga_image_info *info = region->info;
int ret;
+ struct reset_control *rstc;
region = fpga_region_get(region);
if (IS_ERR(region)) {
@@ -141,7 +143,15 @@ int fpga_region_program_fpga(struct fpga_region *region)
goto err_put_br;
}
+ rstc = of_reset_control_array_get(info->overlay, false, true, true);
+ if (IS_ERR(rstc))
+ goto err_put_br;
+
+ reset_control_reset(rstc);
+ reset_control_put(rstc);
+
fpga_mgr_unlock(region->mgr);
+
fpga_region_put(region);
return 0;
diff --git a/drivers/fpga/versal-fpga.c b/drivers/fpga/versal-fpga.c
new file mode 100644
index 000000000000..d8ef2d17c32b
--- /dev/null
+++ b/drivers/fpga/versal-fpga.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/string.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+/**
+ * struct versal_fpga_priv - Private data structure
+ * @dev: Device data structure
+ * @source: Source of the PDI Image DDR, OCM etc...
+ * @flags: flags which is used to identify the PL Image type
+ * @source_attr: source sysfs attribute
+ */
+struct versal_fpga_priv {
+ struct device *dev;
+ u32 source;
+ u32 flags;
+ struct device_attribute *source_attr;
+};
+
+static int versal_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t size)
+{
+ struct versal_fpga_priv *priv;
+
+ priv = mgr->priv;
+ priv->flags = info->flags;
+
+ return 0;
+}
+
+static int versal_fpga_ops_write(struct fpga_manager *mgr,
+ const char *buf, size_t size)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ struct versal_fpga_priv *priv;
+ dma_addr_t dma_addr;
+ char *kbuf;
+ int ret;
+
+ if (!eemi_ops || !eemi_ops->pdi_load)
+ return -ENXIO;
+
+ priv = mgr->priv;
+
+ kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ memcpy(kbuf, buf, size);
+
+ wmb(); /* ensure all writes are done before initiate FW call */
+
+ ret = eemi_ops->pdi_load(priv->source, dma_addr);
+
+ dma_free_coherent(priv->dev, size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static int versal_fpga_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ return 0;
+}
+
+static enum fpga_mgr_states versal_fpga_ops_state(struct fpga_manager *mgr)
+{
+ return FPGA_MGR_STATE_OPERATING;
+}
+
+static const struct fpga_manager_ops versal_fpga_ops = {
+ .state = versal_fpga_ops_state,
+ .write_init = versal_fpga_ops_write_init,
+ .write = versal_fpga_ops_write,
+ .write_complete = versal_fpga_ops_write_complete,
+};
+
+static ssize_t source_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+ struct versal_fpga_priv *priv = mgr->priv;
+ int ret;
+
+ ret = kstrtou32(buf, 16, &priv->source);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct device_attribute *
+versal_fpga_create_sysfs_entry(struct device *dev, char *name, int mode)
+{
+ struct device_attribute *attrs;
+ char *name_copy;
+
+ attrs = devm_kmalloc(dev, sizeof(struct device_attribute), GFP_KERNEL);
+ if (!attrs)
+ return NULL;
+
+ name_copy = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!name_copy)
+ return NULL;
+
+ attrs->attr.name = name_copy;
+ attrs->attr.mode = mode;
+ attrs->store = source_store;
+ sysfs_attr_init(&attrs->attr);
+
+ return attrs;
+}
+
+static int versal_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct versal_fpga_priv *priv;
+ struct fpga_manager *mgr;
+ int err, ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (ret < 0) {
+ dev_err(dev, "no usable DMA configuration");
+ return ret;
+ }
+
+ mgr = fpga_mgr_create(dev, "Xilinx Versal FPGA Manager",
+ &versal_fpga_ops, priv);
+ if (!mgr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mgr);
+
+ err = fpga_mgr_register(mgr);
+ if (err) {
+ dev_err(dev, "unable to register FPGA manager");
+ fpga_mgr_free(mgr);
+ return err;
+ }
+
+ priv->source_attr = versal_fpga_create_sysfs_entry(&mgr->dev, "source",
+ 0200);
+ if (!priv->source_attr) {
+ dev_err(dev, "unable to create source sysfs attribute");
+ fpga_mgr_unregister(mgr);
+ fpga_mgr_free(mgr);
+ return -ENOMEM;
+ }
+
+ return device_create_file(&mgr->dev, priv->source_attr);
+}
+
+static int versal_fpga_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+ struct versal_fpga_priv *priv = mgr->priv;
+
+ device_remove_file(&mgr->dev, priv->source_attr);
+ fpga_mgr_unregister(mgr);
+ fpga_mgr_free(mgr);
+
+ return 0;
+}
+
+static const struct of_device_id versal_fpga_of_match[] = {
+ { .compatible = "xlnx,versal-fpga", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, versal_fpga_of_match);
+
+static struct platform_driver versal_fpga_driver = {
+ .probe = versal_fpga_probe,
+ .remove = versal_fpga_remove,
+ .driver = {
+ .name = "versal_fpga_manager",
+ .of_match_table = of_match_ptr(versal_fpga_of_match),
+ },
+};
+
+module_platform_driver(versal_fpga_driver);
+
+MODULE_AUTHOR("Nava kishore Manne <nava.manne@xilinx.com>");
+MODULE_AUTHOR("Appana Durga Kedareswara rao <appanad.durga.rao@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx Versal FPGA Manager");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/xilinx-afi.c b/drivers/fpga/xilinx-afi.c
new file mode 100644
index 000000000000..ae3caf9849df
--- /dev/null
+++ b/drivers/fpga/xilinx-afi.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA AFI bridge.
+ * Copyright (c) 2018 Xilinx Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/**
+ * struct afi_fpga - AFI register description
+ * @value: value to be written to the register
+ * @regid: Register id for the register to be written
+ */
+struct afi_fpga {
+ u32 value;
+ u32 regid;
+};
+
+static int afi_fpga_probe(struct platform_device *pdev)
+{
+ struct afi_fpga *afi_fpga;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+ int i, entries, pairs;
+ u32 reg, val;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ if (!eemi_ops->ioctl)
+ return -ENOTSUPP;
+
+ afi_fpga = devm_kzalloc(&pdev->dev, sizeof(*afi_fpga), GFP_KERNEL);
+ if (!afi_fpga)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, afi_fpga);
+
+ entries = of_property_count_u32_elems(np, "config-afi");
+ if (!entries || (entries % 2)) {
+ dev_err(&pdev->dev, "Invalid number of registers\n");
+ return -EINVAL;
+ }
+ pairs = entries / 2;
+
+ for (i = 0; i < pairs; i++) {
+ ret = of_property_read_u32_index(np, "config-afi", i * 2,
+ &reg);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read register\n");
+ return -EINVAL;
+ }
+ ret = of_property_read_u32_index(np, "config-afi", i * 2 + 1,
+ &val);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read value\n");
+ return -EINVAL;
+ }
+ ret = eemi_ops->ioctl(0, IOCTL_AFI, reg, val, NULL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "AFI register write error %d\n",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static const struct of_device_id afi_fpga_ids[] = {
+ { .compatible = "xlnx,afi-fpga" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, afi_fpga_ids);
+
+static struct platform_driver afi_fpga_driver = {
+ .driver = {
+ .name = "afi-fpga",
+ .of_match_table = afi_fpga_ids,
+ },
+ .probe = afi_fpga_probe,
+};
+module_platform_driver(afi_fpga_driver);
+
+MODULE_DESCRIPTION("FPGA afi module");
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/zynq-afi.c b/drivers/fpga/zynq-afi.c
new file mode 100644
index 000000000000..7ce0d089e878
--- /dev/null
+++ b/drivers/fpga/zynq-afi.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA AFI driver.
+ * Copyright (c) 2018 Xilinx Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* Registers and special values for doing register-based operations */
+#define AFI_RDCHAN_CTRL_OFFSET 0x00
+#define AFI_WRCHAN_CTRL_OFFSET 0x14
+
+#define AFI_BUSWIDTH_MASK 0x01
+
+/**
+ * struct afi_fpga - AFI register description
+ * @membase: pointer to register struct
+ * @afi_width: AFI bus width to be written
+ */
+struct zynq_afi_fpga {
+ void __iomem *membase;
+ u32 afi_width;
+};
+
+static int zynq_afi_fpga_probe(struct platform_device *pdev)
+{
+ struct zynq_afi_fpga *afi_fpga;
+ struct resource *res;
+ u32 reg_val;
+ u32 val;
+
+ afi_fpga = devm_kzalloc(&pdev->dev, sizeof(*afi_fpga), GFP_KERNEL);
+ if (!afi_fpga)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ afi_fpga->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(afi_fpga->membase))
+ return PTR_ERR(afi_fpga->membase);
+
+ val = device_property_read_u32(&pdev->dev, "xlnx,afi-width",
+ &afi_fpga->afi_width);
+ if (val) {
+ dev_err(&pdev->dev, "Fail to get the afi bus width\n");
+ return -EINVAL;
+ }
+
+ reg_val = readl(afi_fpga->membase + AFI_RDCHAN_CTRL_OFFSET);
+ reg_val &= ~AFI_BUSWIDTH_MASK;
+ writel(reg_val | afi_fpga->afi_width,
+ afi_fpga->membase + AFI_RDCHAN_CTRL_OFFSET);
+ reg_val = readl(afi_fpga->membase + AFI_WRCHAN_CTRL_OFFSET);
+ reg_val &= ~AFI_BUSWIDTH_MASK;
+ writel(reg_val | afi_fpga->afi_width,
+ afi_fpga->membase + AFI_WRCHAN_CTRL_OFFSET);
+
+ return 0;
+}
+
+static const struct of_device_id zynq_afi_fpga_ids[] = {
+ { .compatible = "xlnx,zynq-afi-fpga" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, zynq_afi_fpga_ids);
+
+static struct platform_driver zynq_afi_fpga_driver = {
+ .driver = {
+ .name = "zynq-afi-fpga",
+ .of_match_table = zynq_afi_fpga_ids,
+ },
+ .probe = zynq_afi_fpga_probe,
+};
+module_platform_driver(zynq_afi_fpga_driver);
+
+MODULE_DESCRIPTION("ZYNQ FPGA AFI module");
+MODULE_AUTHOR("Nava kishore Manne <nava.manne@xilinx.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index b8a88d21d038..894cc36349fd 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2019 Xilinx, Inc.
+ * Copyright (C) 2016 - 2019 Xilinx, Inc.
*/
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/fpga/fpga-mgr.h>
#include <linux/io.h>
@@ -10,19 +11,73 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/string.h>
+#include <linux/seq_file.h>
#include <linux/firmware/xlnx-zynqmp.h>
/* Constant Definitions */
#define IXR_FPGA_DONE_MASK BIT(3)
+#define IXR_FPGA_ENCRYPTION_EN 0x00000008U
+
+#define READ_DMA_SIZE 0x200
+#define DUMMY_FRAMES_SIZE 0x64
+#define PCAP_READ_CLKFREQ 25000000
+
+static bool readback_type;
+module_param(readback_type, bool, 0644);
+MODULE_PARM_DESC(readback_type,
+ "readback_type 0-configuration register read "
+ "1- configuration data read (default: 0)");
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+/**
+ * struct zynqmp_configreg - Configuration register offsets
+ * @reg: Name of the configuration register.
+ * @offset: Register offset.
+ */
+struct zynqmp_configreg {
+ char *reg;
+ u32 offset;
+};
+
+static struct zynqmp_configreg cfgreg[] = {
+ {.reg = "CRC", .offset = 0},
+ {.reg = "FAR", .offset = 1},
+ {.reg = "FDRI", .offset = 2},
+ {.reg = "FDRO", .offset = 3},
+ {.reg = "CMD", .offset = 4},
+ {.reg = "CTRL0", .offset = 5},
+ {.reg = "MASK", .offset = 6},
+ {.reg = "STAT", .offset = 7},
+ {.reg = "LOUT", .offset = 8},
+ {.reg = "COR0", .offset = 9},
+ {.reg = "MFWR", .offset = 10},
+ {.reg = "CBC", .offset = 11},
+ {.reg = "IDCODE", .offset = 12},
+ {.reg = "AXSS", .offset = 13},
+ {.reg = "COR1", .offset = 14},
+ {.reg = "WBSTR", .offset = 16},
+ {.reg = "TIMER", .offset = 17},
+ {.reg = "BOOTSTS", .offset = 22},
+ {.reg = "CTRL1", .offset = 24},
+ {}
+};
/**
* struct zynqmp_fpga_priv - Private data structure
* @dev: Device data structure
+ * @lock: Mutex lock for device
+ * @clk: Clock resource for pcap controller
* @flags: flags which is used to identify the bitfile type
+ * @size: Size of the Bitstream used for readback
*/
struct zynqmp_fpga_priv {
struct device *dev;
+ struct mutex lock;
+ struct clk *clk;
+ char *key;
u32 flags;
+ u32 size;
};
static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr,
@@ -33,40 +88,62 @@ static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr,
priv = mgr->priv;
priv->flags = info->flags;
+ priv->key = info->key;
return 0;
}
static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
- const char *buf, size_t size)
+ const char *buf, size_t size)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
struct zynqmp_fpga_priv *priv;
- dma_addr_t dma_addr;
- u32 eemi_flags = 0;
char *kbuf;
+ size_t dma_size;
+ dma_addr_t dma_addr;
int ret;
- if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_load)
+ if (!eemi_ops->fpga_load)
return -ENXIO;
priv = mgr->priv;
+ priv->size = size;
- kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
+ if (!mutex_trylock(&priv->lock))
+ return -EBUSY;
- memcpy(kbuf, buf, size);
+ ret = clk_enable(priv->clk);
+ if (ret)
+ goto err_unlock;
- wmb(); /* ensure all writes are done before initiate FW call */
+ if (priv->flags & IXR_FPGA_ENCRYPTION_EN)
+ dma_size = size + ENCRYPTED_KEY_LEN;
+ else
+ dma_size = size;
- if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG)
- eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL;
+ kbuf = dma_alloc_coherent(priv->dev, dma_size, &dma_addr, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto disable_clk;
+ }
- ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags);
+ memcpy(kbuf, buf, size);
+
+ if (priv->flags & IXR_FPGA_ENCRYPTION_EN)
+ memcpy(kbuf + size, priv->key, ENCRYPTED_KEY_LEN);
- dma_free_coherent(priv->dev, size, kbuf, dma_addr);
+ wmb(); /* ensure all writes are done before initiate FW call */
+ if (priv->flags & IXR_FPGA_ENCRYPTION_EN)
+ ret = eemi_ops->fpga_load(dma_addr, dma_addr + size,
+ priv->flags);
+ else
+ ret = eemi_ops->fpga_load(dma_addr, size, priv->flags);
+
+ dma_free_coherent(priv->dev, dma_size, kbuf, dma_addr);
+disable_clk:
+ clk_disable(priv->clk);
+err_unlock:
+ mutex_unlock(&priv->lock);
return ret;
}
@@ -78,10 +155,9 @@ static int zynqmp_fpga_ops_write_complete(struct fpga_manager *mgr,
static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
u32 status;
- if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_get_status)
+ if (!eemi_ops->fpga_get_status)
return FPGA_MGR_STATE_UNKNOWN;
eemi_ops->fpga_get_status(&status);
@@ -91,25 +167,152 @@ static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
return FPGA_MGR_STATE_UNKNOWN;
}
+static int zynqmp_fpga_read_cfgreg(struct fpga_manager *mgr,
+ struct seq_file *s)
+{
+ struct zynqmp_fpga_priv *priv = mgr->priv;
+ int ret, val;
+ unsigned int *buf;
+ dma_addr_t dma_addr;
+ struct zynqmp_configreg *p = cfgreg;
+
+ ret = clk_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ buf = dma_alloc_coherent(mgr->dev.parent, READ_DMA_SIZE,
+ &dma_addr, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto disable_clk;
+ }
+
+ seq_puts(s, "zynqMP FPGA Configuration register contents are\n");
+
+ while (p->reg) {
+ ret = eemi_ops->fpga_read(p->offset, dma_addr, readback_type,
+ &val);
+ if (ret)
+ goto free_dmabuf;
+ seq_printf(s, "%s --> \t %x \t\r\n", p->reg, val);
+ p++;
+ }
+
+free_dmabuf:
+ dma_free_coherent(mgr->dev.parent, READ_DMA_SIZE, buf,
+ dma_addr);
+disable_clk:
+ clk_disable(priv->clk);
+
+ return ret;
+}
+
+static int zynqmp_fpga_read_cfgdata(struct fpga_manager *mgr,
+ struct seq_file *s)
+{
+ struct zynqmp_fpga_priv *priv;
+ int ret, data_offset;
+ unsigned int *buf;
+ dma_addr_t dma_addr;
+ size_t size;
+ int clk_rate;
+
+ priv = mgr->priv;
+ size = priv->size + READ_DMA_SIZE + DUMMY_FRAMES_SIZE;
+
+ /*
+ * There is no h/w flow control for pcap read
+ * to prevent the FIFO from over flowing, reduce
+ * the PCAP operating frequency.
+ */
+ clk_rate = clk_get_rate(priv->clk);
+ clk_unprepare(priv->clk);
+ ret = clk_set_rate(priv->clk, PCAP_READ_CLKFREQ);
+ if (ret) {
+ dev_err(&mgr->dev, "Unable to reduce the PCAP freq %d\n", ret);
+ goto prepare_clk;
+ }
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(&mgr->dev, "Cannot enable clock.\n");
+ goto restore_pcap_clk;
+ }
+
+ buf = dma_alloc_coherent(mgr->dev.parent, size, &dma_addr,
+ GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto disable_clk;
+ }
+
+ seq_puts(s, "zynqMP FPGA Configuration data contents are\n");
+ ret = eemi_ops->fpga_read((priv->size + DUMMY_FRAMES_SIZE) / 4,
+ dma_addr, readback_type, &data_offset);
+ if (ret)
+ goto free_dmabuf;
+
+ seq_write(s, &buf[data_offset], priv->size);
+
+free_dmabuf:
+ dma_free_coherent(mgr->dev.parent, size, buf, dma_addr);
+disable_clk:
+ clk_disable_unprepare(priv->clk);
+restore_pcap_clk:
+ clk_set_rate(priv->clk, clk_rate);
+prepare_clk:
+ clk_prepare(priv->clk);
+
+ return ret;
+}
+
+static int zynqmp_fpga_ops_read(struct fpga_manager *mgr, struct seq_file *s)
+{
+ struct zynqmp_fpga_priv *priv = mgr->priv;
+ int ret;
+
+ if (!eemi_ops->fpga_read)
+ return -ENXIO;
+
+ if (!mutex_trylock(&priv->lock))
+ return -EBUSY;
+
+ if (readback_type)
+ ret = zynqmp_fpga_read_cfgdata(mgr, s);
+ else
+ ret = zynqmp_fpga_read_cfgreg(mgr, s);
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
static const struct fpga_manager_ops zynqmp_fpga_ops = {
.state = zynqmp_fpga_ops_state,
.write_init = zynqmp_fpga_ops_write_init,
.write = zynqmp_fpga_ops_write,
.write_complete = zynqmp_fpga_ops_write_complete,
+ .read = zynqmp_fpga_ops_read,
};
static int zynqmp_fpga_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zynqmp_fpga_priv *priv;
- struct fpga_manager *mgr;
int ret;
+ struct fpga_manager *mgr;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
+ mutex_init(&priv->lock);
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (ret < 0)
+ dev_err(dev, "no usable DMA configuration");
mgr = devm_fpga_mgr_create(dev, "Xilinx ZynqMP FPGA Manager",
&zynqmp_fpga_ops, priv);
@@ -118,9 +321,23 @@ static int zynqmp_fpga_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mgr);
+ priv->clk = devm_clk_get(dev, "ref_clk");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ dev_err(dev, "failed to to get pcp ref_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare(priv->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock.\n");
+ return ret;
+ }
+
ret = fpga_mgr_register(mgr);
if (ret) {
dev_err(dev, "unable to register FPGA manager");
+ clk_unprepare(priv->clk);
return ret;
}
@@ -129,9 +346,14 @@ static int zynqmp_fpga_probe(struct platform_device *pdev)
static int zynqmp_fpga_remove(struct platform_device *pdev)
{
- struct fpga_manager *mgr = platform_get_drvdata(pdev);
+ struct zynqmp_fpga_priv *priv;
+ struct fpga_manager *mgr;
+
+ mgr = platform_get_drvdata(pdev);
+ priv = mgr->priv;
fpga_mgr_unregister(mgr);
+ clk_unprepare(priv->clk);
return 0;
}
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 0852d8d5c389..906b67a93fa8 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -10,20 +10,32 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
/* Register Offset Definitions */
-#define XGPIO_DATA_OFFSET (0x0) /* Data register */
-#define XGPIO_TRI_OFFSET (0x4) /* I/O direction register */
+#define XGPIO_DATA_OFFSET 0x0 /* Data register */
+#define XGPIO_TRI_OFFSET 0x4 /* I/O direction register */
+#define XGPIO_GIER_OFFSET 0x11c /* Global Interrupt Enable */
+#define XGPIO_GIER_IE BIT(31)
+
+#define XGPIO_IPISR_OFFSET 0x120 /* IP Interrupt Status */
+#define XGPIO_IPIER_OFFSET 0x128 /* IP Interrupt Enable */
#define XGPIO_CHANNEL_OFFSET 0x8
/* Read/Write access to the GPIO registers */
-#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_X86)
+#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_ARM64)
# define xgpio_readreg(offset) readl(offset)
# define xgpio_writereg(offset, val) writel(val, offset)
#else
@@ -34,43 +46,31 @@
/**
* struct xgpio_instance - Stores information about GPIO device
* @mmchip: OF GPIO chip for memory mapped banks
- * @gpio_width: GPIO width for every channel
+ * @mmchip_dual: Pointer to the OF dual gpio chip
* @gpio_state: GPIO state shadow register
* @gpio_dir: GPIO direction shadow register
+ * @offset: GPIO channel offset
+ * @irq_base: GPIO channel irq base address
+ * @irq_enable: GPIO irq enable/disable bitfield
+ * @no_init: No intitialisation at probe
* @gpio_lock: Lock used for synchronization
+ * @irq_domain: irq_domain of the controller
+ * @clk: clock resource for this driver
*/
struct xgpio_instance {
struct of_mm_gpio_chip mmchip;
- unsigned int gpio_width[2];
- u32 gpio_state[2];
- u32 gpio_dir[2];
- spinlock_t gpio_lock[2];
+ struct of_mm_gpio_chip *mmchip_dual;
+ u32 gpio_state;
+ u32 gpio_dir;
+ u32 offset;
+ int irq_base;
+ u32 irq_enable;
+ bool no_init;
+ spinlock_t gpio_lock;
+ struct irq_domain *irq_domain;
+ struct clk *clk;
};
-static inline int xgpio_index(struct xgpio_instance *chip, int gpio)
-{
- if (gpio >= chip->gpio_width[0])
- return 1;
-
- return 0;
-}
-
-static inline int xgpio_regoffset(struct xgpio_instance *chip, int gpio)
-{
- if (xgpio_index(chip, gpio))
- return XGPIO_CHANNEL_OFFSET;
-
- return 0;
-}
-
-static inline int xgpio_offset(struct xgpio_instance *chip, int gpio)
-{
- if (xgpio_index(chip, gpio))
- return gpio - chip->gpio_width[0];
-
- return gpio;
-}
-
/**
* xgpio_get - Read the specified signal of the GPIO device.
* @gc: Pointer to gpio_chip device structure.
@@ -85,13 +85,12 @@ static inline int xgpio_offset(struct xgpio_instance *chip, int gpio)
static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct xgpio_instance *chip = gpiochip_get_data(gc);
- u32 val;
+ struct xgpio_instance *chip =
+ container_of(mm_gc, struct xgpio_instance, mmchip);
- val = xgpio_readreg(mm_gc->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, gpio));
+ void __iomem *regs = mm_gc->regs + chip->offset;
- return !!(val & BIT(xgpio_offset(chip, gpio)));
+ return !!(xgpio_readreg(regs + XGPIO_DATA_OFFSET) & BIT(gpio));
}
/**
@@ -107,22 +106,22 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, gpio);
- int offset = xgpio_offset(chip, gpio);
+ struct xgpio_instance *chip =
+ container_of(mm_gc, struct xgpio_instance, mmchip);
+ void __iomem *regs = mm_gc->regs;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signal and set its direction to output */
if (val)
- chip->gpio_state[index] |= BIT(offset);
+ chip->gpio_state |= BIT(gpio);
else
- chip->gpio_state[index] &= ~BIT(offset);
+ chip->gpio_state &= ~BIT(gpio);
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
+ xgpio_writereg(regs + chip->offset + XGPIO_DATA_OFFSET,
+ chip->gpio_state);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -139,38 +138,29 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
{
unsigned long flags;
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, 0);
- int offset, i;
+ struct xgpio_instance *chip =
+ container_of(mm_gc, struct xgpio_instance, mmchip);
+ void __iomem *regs = mm_gc->regs;
+ int i;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signals */
for (i = 0; i < gc->ngpio; i++) {
if (*mask == 0)
break;
- /* Once finished with an index write it out to the register */
- if (index != xgpio_index(chip, i)) {
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET +
- index * XGPIO_CHANNEL_OFFSET,
- chip->gpio_state[index]);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
- index = xgpio_index(chip, i);
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
- }
if (__test_and_clear_bit(i, mask)) {
- offset = xgpio_offset(chip, i);
if (test_bit(i, bits))
- chip->gpio_state[index] |= BIT(offset);
+ chip->gpio_state |= BIT(i);
else
- chip->gpio_state[index] &= ~BIT(offset);
+ chip->gpio_state &= ~BIT(i);
}
}
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET +
- index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
+ xgpio_writereg(regs + chip->offset + XGPIO_DATA_OFFSET,
+ chip->gpio_state);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -178,6 +168,8 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
* @gc: Pointer to gpio_chip device structure.
* @gpio: GPIO signal number.
*
+ * This function sets the direction of specified GPIO signal as input.
+ *
* Return:
* 0 - if direction of GPIO signals is set as input
* otherwise it returns negative error value.
@@ -186,18 +178,17 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
unsigned long flags;
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, gpio);
- int offset = xgpio_offset(chip, gpio);
+ struct xgpio_instance *chip =
+ container_of(mm_gc, struct xgpio_instance, mmchip);
+ void __iomem *regs = mm_gc->regs;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set the GPIO bit in shadow register and set direction as input */
- chip->gpio_dir[index] |= BIT(offset);
- xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
+ chip->gpio_dir |= BIT(gpio);
+ xgpio_writereg(regs + chip->offset + XGPIO_TRI_OFFSET, chip->gpio_dir);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -218,26 +209,25 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, gpio);
- int offset = xgpio_offset(chip, gpio);
+ struct xgpio_instance *chip =
+ container_of(mm_gc, struct xgpio_instance, mmchip);
+ void __iomem *regs = mm_gc->regs;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write state of GPIO signal */
if (val)
- chip->gpio_state[index] |= BIT(offset);
+ chip->gpio_state |= BIT(gpio);
else
- chip->gpio_state[index] &= ~BIT(offset);
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
+ chip->gpio_state &= ~BIT(gpio);
+ xgpio_writereg(regs + chip->offset + XGPIO_DATA_OFFSET,
+ chip->gpio_state);
/* Clear the GPIO bit in shadow register and set direction as output */
- chip->gpio_dir[index] &= ~BIT(offset);
- xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
+ chip->gpio_dir &= ~BIT(gpio);
+ xgpio_writereg(regs + chip->offset + XGPIO_TRI_OFFSET, chip->gpio_dir);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -249,20 +239,321 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc)
{
struct xgpio_instance *chip =
- container_of(mm_gc, struct xgpio_instance, mmchip);
+ container_of(mm_gc, struct xgpio_instance, mmchip);
+ if (chip->no_init) {
+ chip->gpio_state = xgpio_readreg(mm_gc->regs +
+ XGPIO_DATA_OFFSET);
+ chip->gpio_dir = xgpio_readreg(mm_gc->regs + XGPIO_TRI_OFFSET);
+ } else {
+ xgpio_writereg(mm_gc->regs + chip->offset + XGPIO_DATA_OFFSET,
+ chip->gpio_state);
+ xgpio_writereg(mm_gc->regs + chip->offset + XGPIO_TRI_OFFSET,
+ chip->gpio_dir);
+ }
+}
+
+/**
+ * xgpio_xlate - Translate gpio_spec to the GPIO number and flags
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpiospec: gpio specifier as found in the device tree
+ * @flags: A flags pointer based on binding
+ *
+ * Return:
+ * irq number otherwise -EINVAL
+ */
+static int xgpio_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec, u32 *flags)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct xgpio_instance *chip = container_of(mm_gc, struct xgpio_instance,
+ mmchip);
+ if (gc->of_gpio_n_cells == 3 && flags)
+ *flags = gpiospec->args[2];
+
+ if (gpiospec->args[1] == chip->offset)
+ return gpiospec->args[0];
+
+ return -EINVAL;
+}
+
+/**
+ * xgpio_irq_mask - Write the specified signal of the GPIO device.
+ * @irq_data: per irq and chip data passed down to chip functions
+ */
+static void xgpio_irq_mask(struct irq_data *irq_data)
+{
+ unsigned long flags;
+ struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
+ struct of_mm_gpio_chip *mm_gc = &chip->mmchip;
+ u32 offset = irq_data->irq - chip->irq_base;
+ u32 temp;
+
+ pr_debug("%s: Disable %d irq, irq_enable_mask 0x%x\n",
+ __func__, offset, chip->irq_enable);
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+
+ chip->irq_enable &= ~BIT(offset);
+
+ if (!chip->irq_enable) {
+ /* Enable per channel interrupt */
+ temp = xgpio_readreg(mm_gc->regs + XGPIO_IPIER_OFFSET);
+ temp &= chip->offset / XGPIO_CHANNEL_OFFSET + 1;
+ xgpio_writereg(mm_gc->regs + XGPIO_IPIER_OFFSET, temp);
+
+ /* Disable global interrupt if channel interrupts are unused */
+ temp = xgpio_readreg(mm_gc->regs + XGPIO_IPIER_OFFSET);
+ if (!temp)
+ xgpio_writereg(mm_gc->regs + XGPIO_GIER_OFFSET,
+ ~XGPIO_GIER_IE);
+
+ }
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+}
+
+/**
+ * xgpio_irq_unmask - Write the specified signal of the GPIO device.
+ * @irq_data: per irq and chip data passed down to chip functions
+ */
+static void xgpio_irq_unmask(struct irq_data *irq_data)
+{
+ unsigned long flags;
+ struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
+ struct of_mm_gpio_chip *mm_gc = &chip->mmchip;
+ u32 offset = irq_data->irq - chip->irq_base;
+ u32 temp;
+
+ pr_debug("%s: Enable %d irq, irq_enable_mask 0x%x\n",
+ __func__, offset, chip->irq_enable);
+
+ /* Setup pin as input */
+ xgpio_dir_in(&mm_gc->gc, offset);
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+
+ chip->irq_enable |= BIT(offset);
+
+ if (chip->irq_enable) {
+
+ /* Enable per channel interrupt */
+ temp = xgpio_readreg(mm_gc->regs + XGPIO_IPIER_OFFSET);
+ temp |= chip->offset / XGPIO_CHANNEL_OFFSET + 1;
+ xgpio_writereg(mm_gc->regs + XGPIO_IPIER_OFFSET, temp);
+
+ /* Enable global interrupts */
+ xgpio_writereg(mm_gc->regs + XGPIO_GIER_OFFSET, XGPIO_GIER_IE);
+ }
+
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+}
+
+/**
+ * xgpio_set_irq_type - Write the specified signal of the GPIO device.
+ * @irq_data: Per irq and chip data passed down to chip functions
+ * @type: Interrupt type that is to be set for the gpio pin
+ *
+ * Return:
+ * 0 if interrupt type is supported otherwise otherwise -EINVAL
+ */
+static int xgpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
+{
+ /* Only rising edge case is supported now */
+ if (type & IRQ_TYPE_EDGE_RISING)
+ return 0;
+
+ return -EINVAL;
+}
+
+/* irq chip descriptor */
+static struct irq_chip xgpio_irqchip = {
+ .name = "xgpio",
+ .irq_mask = xgpio_irq_mask,
+ .irq_unmask = xgpio_irq_unmask,
+ .irq_set_type = xgpio_set_irq_type,
+};
+
+/**
+ * xgpio_to_irq - Find out gpio to Linux irq mapping
+ * @gc: Pointer to gpio_chip device structure.
+ * @offset: Gpio pin offset
+ *
+ * Return:
+ * irq number otherwise -EINVAL
+ */
+static int xgpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct xgpio_instance *chip = container_of(mm_gc, struct xgpio_instance,
+ mmchip);
+
+ return irq_find_mapping(chip->irq_domain, offset);
+}
+
+/**
+ * xgpio_irqhandler - Gpio interrupt service routine
+ * @desc: Pointer to interrupt description
+ */
+static void xgpio_irqhandler(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+
+ struct xgpio_instance *chip = (struct xgpio_instance *)
+ irq_get_handler_data(irq);
+ struct of_mm_gpio_chip *mm_gc = &chip->mmchip;
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ int offset;
+ unsigned long val;
+
+ chained_irq_enter(irqchip, desc);
+
+ val = xgpio_readreg(mm_gc->regs + chip->offset);
+ /* Only rising edge is supported */
+ val &= chip->irq_enable;
+
+ for_each_set_bit(offset, &val, chip->mmchip.gc.ngpio) {
+ generic_handle_irq(chip->irq_base + offset);
+ }
+
+ xgpio_writereg(mm_gc->regs + XGPIO_IPISR_OFFSET,
+ chip->offset / XGPIO_CHANNEL_OFFSET + 1);
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
+
+/**
+ * xgpio_irq_setup - Allocate irq for gpio and setup appropriate functions
+ * @np: Device node of the GPIO chip
+ * @chip: Pointer to private gpio channel structure
+ *
+ * Return:
+ * 0 if success, otherwise -1
+ */
+static int xgpio_irq_setup(struct device_node *np, struct xgpio_instance *chip)
+{
+ u32 pin_num;
+ struct resource res;
+
+ int ret = of_irq_to_resource(np, 0, &res);
+
+ if (ret <= 0) {
+ pr_info("GPIO IRQ not connected\n");
+ return 0;
+ }
+
+ chip->mmchip.gc.to_irq = xgpio_to_irq;
+
+ chip->irq_base = irq_alloc_descs(-1, 0, chip->mmchip.gc.ngpio, 0);
+ if (chip->irq_base < 0) {
+ pr_err("Couldn't allocate IRQ numbers\n");
+ return -1;
+ }
+ chip->irq_domain = irq_domain_add_legacy(np, chip->mmchip.gc.ngpio,
+ chip->irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+
+ /*
+ * set the irq chip, handler and irq chip data for callbacks for
+ * each pin
+ */
+ for (pin_num = 0; pin_num < chip->mmchip.gc.ngpio; pin_num++) {
+ u32 gpio_irq = irq_find_mapping(chip->irq_domain, pin_num);
+
+ irq_set_lockdep_class(gpio_irq, &gpio_lock_class,
+ &gpio_request_class);
+ pr_debug("IRQ Base: %d, Pin %d = IRQ %d\n",
+ chip->irq_base, pin_num, gpio_irq);
+ irq_set_chip_and_handler(gpio_irq, &xgpio_irqchip,
+ handle_simple_irq);
+ irq_set_chip_data(gpio_irq, (void *)chip);
+ }
+ irq_set_handler_data(res.start, (void *)chip);
+ irq_set_chained_handler(res.start, xgpio_irqhandler);
+
+ return 0;
+}
+
+static int xgpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret = pm_runtime_get_sync(chip->parent);
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET, chip->gpio_state[0]);
- xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET, chip->gpio_dir[0]);
+ /*
+ * If the device is already active pm_runtime_get() will return 1 on
+ * success, but gpio_request still needs to return 0.
+ */
+ return ret < 0 ? ret : 0;
+}
- if (!chip->gpio_width[1])
- return;
+static void xgpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ pm_runtime_put(chip->parent);
+}
- xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_CHANNEL_OFFSET,
- chip->gpio_state[1]);
- xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_CHANNEL_OFFSET,
- chip->gpio_dir[1]);
+static int __maybe_unused xgpio_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+ struct irq_data *data;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_dbg(dev, "failed to get IRQ\n");
+ return 0;
+ }
+
+ data = irq_get_irq_data(irq);
+ if (!irqd_is_wakeup_set(data))
+ return pm_runtime_force_suspend(dev);
+
+ return 0;
}
+static int __maybe_unused xgpio_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+ struct irq_data *data;
+
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_dbg(dev, "failed to get IRQ\n");
+ return 0;
+ }
+
+ data = irq_get_irq_data(irq);
+ if (!irqd_is_wakeup_set(data))
+ return pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+static int __maybe_unused xgpio_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+
+ clk_disable(gpio->clk);
+
+ return 0;
+}
+
+static int __maybe_unused xgpio_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+
+ return clk_enable(gpio->clk);
+}
+
+static const struct dev_pm_ops xgpio_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xgpio_suspend, xgpio_resume)
+ SET_RUNTIME_PM_OPS(xgpio_runtime_suspend,
+ xgpio_runtime_resume, NULL)
+};
+
/**
* xgpio_remove - Remove method for the GPIO device.
* @pdev: pointer to the platform device
@@ -276,118 +567,225 @@ static int xgpio_remove(struct platform_device *pdev)
struct xgpio_instance *chip = platform_get_drvdata(pdev);
of_mm_gpiochip_remove(&chip->mmchip);
+ if (chip->mmchip_dual)
+ of_mm_gpiochip_remove(chip->mmchip_dual);
+ if (!pm_runtime_suspended(&pdev->dev))
+ clk_disable(chip->clk);
+ clk_unprepare(chip->clk);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
/**
* xgpio_of_probe - Probe method for the GPIO device.
- * @pdev: pointer to the platform device
+ * @pdev: platform device instance
+ *
+ * This function probes the GPIO device in the device tree. It initializes the
+ * driver data structure.
*
* Return:
* It returns 0, if the driver is bound to the GPIO device, or
* a negative value if there is an error.
*/
-static int xgpio_probe(struct platform_device *pdev)
+static int xgpio_of_probe(struct platform_device *pdev)
{
- struct xgpio_instance *chip;
- int status = 0;
struct device_node *np = pdev->dev.of_node;
- u32 is_dual;
+ struct xgpio_instance *chip, *chip_dual;
+ int status = 0;
+ const u32 *tree_info;
+ u32 ngpio;
+ u32 cells = 2;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- platform_set_drvdata(pdev, chip);
-
/* Update GPIO state shadow register with default value */
- of_property_read_u32(np, "xlnx,dout-default", &chip->gpio_state[0]);
+ of_property_read_u32(np, "xlnx,dout-default", &chip->gpio_state);
+
+ /* By default, all pins are inputs */
+ chip->gpio_dir = 0xFFFFFFFF;
/* Update GPIO direction shadow register with default value */
- if (of_property_read_u32(np, "xlnx,tri-default", &chip->gpio_dir[0]))
- chip->gpio_dir[0] = 0xFFFFFFFF;
+ of_property_read_u32(np, "xlnx,tri-default", &chip->gpio_dir);
+
+ chip->no_init = of_property_read_bool(np, "xlnx,no-init");
+
+ /* Update cells with gpio-cells value */
+ of_property_read_u32(np, "#gpio-cells", &cells);
/*
* Check device node and parent device node for device width
* and assume default width of 32
*/
- if (of_property_read_u32(np, "xlnx,gpio-width", &chip->gpio_width[0]))
- chip->gpio_width[0] = 32;
-
- spin_lock_init(&chip->gpio_lock[0]);
-
- if (of_property_read_u32(np, "xlnx,is-dual", &is_dual))
- is_dual = 0;
-
- if (is_dual) {
- /* Update GPIO state shadow register with default value */
- of_property_read_u32(np, "xlnx,dout-default-2",
- &chip->gpio_state[1]);
-
- /* Update GPIO direction shadow register with default value */
- if (of_property_read_u32(np, "xlnx,tri-default-2",
- &chip->gpio_dir[1]))
- chip->gpio_dir[1] = 0xFFFFFFFF;
-
- /*
- * Check device node and parent device node for device width
- * and assume default width of 32
- */
- if (of_property_read_u32(np, "xlnx,gpio2-width",
- &chip->gpio_width[1]))
- chip->gpio_width[1] = 32;
+ if (of_property_read_u32(np, "xlnx,gpio-width", &ngpio))
+ ngpio = 32;
+ chip->mmchip.gc.ngpio = (u16)ngpio;
- spin_lock_init(&chip->gpio_lock[1]);
- }
+ spin_lock_init(&chip->gpio_lock);
- chip->mmchip.gc.ngpio = chip->gpio_width[0] + chip->gpio_width[1];
chip->mmchip.gc.parent = &pdev->dev;
+ chip->mmchip.gc.owner = THIS_MODULE;
+ chip->mmchip.gc.of_xlate = xgpio_xlate;
+ chip->mmchip.gc.of_gpio_n_cells = cells;
chip->mmchip.gc.direction_input = xgpio_dir_in;
chip->mmchip.gc.direction_output = xgpio_dir_out;
chip->mmchip.gc.get = xgpio_get;
chip->mmchip.gc.set = xgpio_set;
+ chip->mmchip.gc.request = xgpio_request;
+ chip->mmchip.gc.free = xgpio_free;
chip->mmchip.gc.set_multiple = xgpio_set_multiple;
chip->mmchip.save_regs = xgpio_save_regs;
+ platform_set_drvdata(pdev, chip);
+
+ chip->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(chip->clk)) {
+ if ((PTR_ERR(chip->clk) != -ENOENT) ||
+ (PTR_ERR(chip->clk) != -EPROBE_DEFER)) {
+ dev_err(&pdev->dev, "Input clock not found\n");
+ return PTR_ERR(chip->clk);
+ }
+
+ /*
+ * Clock framework support is optional, continue on
+ * anyways if we don't find a matching clock.
+ */
+ chip->clk = NULL;
+ }
+
+ status = clk_prepare_enable(chip->clk);
+ if (status < 0) {
+ dev_err(&pdev->dev, "Failed to prepare clk\n");
+ return status;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
/* Call the OF gpio helper to setup and register the GPIO device */
- status = of_mm_gpiochip_add_data(np, &chip->mmchip, chip);
+ status = of_mm_gpiochip_add(np, &chip->mmchip);
if (status) {
pr_err("%pOF: error in probe function with status %d\n",
np, status);
- return status;
+ goto err_unprepare_clk;
}
+ status = xgpio_irq_setup(np, chip);
+ if (status) {
+ pr_err("%s: GPIO IRQ initialization failed %d\n",
+ np->full_name, status);
+ goto err_pm_put;
+ }
+
+ pr_info("XGpio: %s: registered, base is %d\n", np->full_name,
+ chip->mmchip.gc.base);
+
+ tree_info = of_get_property(np, "xlnx,is-dual", NULL);
+ if (tree_info && be32_to_cpup(tree_info)) {
+ chip_dual = devm_kzalloc(&pdev->dev, sizeof(*chip_dual),
+ GFP_KERNEL);
+ if (!chip_dual)
+ goto err_pm_put;
+
+ /* Add dual channel offset */
+ chip_dual->offset = XGPIO_CHANNEL_OFFSET;
+
+ /* Update GPIO state shadow register with default value */
+ of_property_read_u32(np, "xlnx,dout-default-2",
+ &chip_dual->gpio_state);
+
+ /* By default, all pins are inputs */
+ chip_dual->gpio_dir = 0xFFFFFFFF;
+
+ /* Update GPIO direction shadow register with default value */
+ of_property_read_u32(np, "xlnx,tri-default-2",
+ &chip_dual->gpio_dir);
+
+ /*
+ * Check device node and parent device node for device width
+ * and assume default width of 32
+ */
+ if (of_property_read_u32(np, "xlnx,gpio2-width", &ngpio))
+ ngpio = 32;
+ chip_dual->mmchip.gc.ngpio = (u16)ngpio;
+
+ spin_lock_init(&chip_dual->gpio_lock);
+
+ chip_dual->mmchip.gc.parent = &pdev->dev;
+ chip_dual->mmchip.gc.owner = THIS_MODULE;
+ chip_dual->mmchip.gc.of_xlate = xgpio_xlate;
+ chip_dual->mmchip.gc.of_gpio_n_cells = cells;
+ chip_dual->mmchip.gc.direction_input = xgpio_dir_in;
+ chip_dual->mmchip.gc.direction_output = xgpio_dir_out;
+ chip_dual->mmchip.gc.get = xgpio_get;
+ chip_dual->mmchip.gc.set = xgpio_set;
+ chip_dual->mmchip.gc.request = xgpio_request;
+ chip_dual->mmchip.gc.free = xgpio_free;
+ chip_dual->mmchip.gc.set_multiple = xgpio_set_multiple;
+
+ chip_dual->mmchip.save_regs = xgpio_save_regs;
+
+ chip->mmchip_dual = &chip_dual->mmchip;
+
+ status = xgpio_irq_setup(np, chip_dual);
+ if (status) {
+ pr_err("%s: GPIO IRQ initialization failed %d\n",
+ np->full_name, status);
+ goto err_pm_put;
+ }
+
+ /* Call the OF gpio helper to setup and register the GPIO dev */
+ status = of_mm_gpiochip_add(np, &chip_dual->mmchip);
+ if (status) {
+ pr_err("%s: error in probe function with status %d\n",
+ np->full_name, status);
+ goto err_pm_put;
+ }
+ pr_info("XGpio: %s: dual channel registered, base is %d\n",
+ np->full_name, chip_dual->mmchip.gc.base);
+ }
+
+ pm_runtime_put(&pdev->dev);
return 0;
+
+err_pm_put:
+ pm_runtime_put(&pdev->dev);
+err_unprepare_clk:
+ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(chip->clk);
+ return status;
}
static const struct of_device_id xgpio_of_match[] = {
{ .compatible = "xlnx,xps-gpio-1.00.a", },
{ /* end of list */ },
};
-
MODULE_DEVICE_TABLE(of, xgpio_of_match);
-static struct platform_driver xgpio_plat_driver = {
- .probe = xgpio_probe,
- .remove = xgpio_remove,
- .driver = {
- .name = "gpio-xilinx",
- .of_match_table = xgpio_of_match,
+static struct platform_driver xilinx_gpio_driver = {
+ .probe = xgpio_of_probe,
+ .remove = xgpio_remove,
+ .driver = {
+ .name = "xilinx-gpio",
+ .of_match_table = xgpio_of_match,
+ .pm = &xgpio_dev_pm_ops,
},
};
static int __init xgpio_init(void)
{
- return platform_driver_register(&xgpio_plat_driver);
+ return platform_driver_register(&xilinx_gpio_driver);
}
+/* Make sure we get initialized before anyone else tries to use us */
subsys_initcall(xgpio_init);
static void __exit xgpio_exit(void)
{
- platform_driver_unregister(&xgpio_plat_driver);
+ platform_driver_unregister(&xilinx_gpio_driver);
}
module_exit(xgpio_exit);
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 211728c8ede2..2cdee9dd2381 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -10,6 +10,7 @@
#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -21,6 +22,8 @@
/* Maximum banks */
#define ZYNQ_GPIO_MAX_BANK 4
#define ZYNQMP_GPIO_MAX_BANK 6
+#define VERSAL_GPIO_MAX_BANK 4
+#define VERSAL_UNUSED_BANKS 2
#define ZYNQ_GPIO_BANK0_NGPIO 32
#define ZYNQ_GPIO_BANK1_NGPIO 22
@@ -95,6 +98,7 @@
/* set to differentiate zynq from zynqmp, 0=zynqmp, 1=zynq */
#define ZYNQ_GPIO_QUIRK_IS_ZYNQ BIT(0)
#define GPIO_QUIRK_DATA_RO_BUG BIT(1)
+#define GPIO_QUIRK_VERSAL BIT(2)
struct gpio_regs {
u32 datamsw[ZYNQMP_GPIO_MAX_BANK];
@@ -116,6 +120,7 @@ struct gpio_regs {
* @irq: interrupt for the GPIO device
* @p_data: pointer to platform data
* @context: context registers
+ * @dirlock: lock used for direction in/out synchronization
*/
struct zynq_gpio {
struct gpio_chip chip;
@@ -124,6 +129,7 @@ struct zynq_gpio {
int irq;
const struct zynq_platform_data *p_data;
struct gpio_regs context;
+ spinlock_t dirlock;
};
/**
@@ -196,6 +202,8 @@ static inline void zynq_gpio_get_bank_pin(unsigned int pin_num,
gpio->p_data->bank_min[bank];
return;
}
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank = bank + VERSAL_UNUSED_BANKS;
}
/* default */
@@ -297,6 +305,7 @@ static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
{
u32 reg;
unsigned int bank_num, bank_pin_num;
+ unsigned long flags;
struct zynq_gpio *gpio = gpiochip_get_data(chip);
zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
@@ -310,9 +319,11 @@ static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
return -EINVAL;
/* clear the bit in direction mode reg to set the pin as input */
+ spin_lock_irqsave(&gpio->dirlock, flags);
reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
reg &= ~BIT(bank_pin_num);
writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
+ spin_unlock_irqrestore(&gpio->dirlock, flags);
return 0;
}
@@ -334,11 +345,13 @@ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
{
u32 reg;
unsigned int bank_num, bank_pin_num;
+ unsigned long flags;
struct zynq_gpio *gpio = gpiochip_get_data(chip);
zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
/* set the GPIO pin as output */
+ spin_lock_irqsave(&gpio->dirlock, flags);
reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
reg |= BIT(bank_pin_num);
writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
@@ -347,6 +360,7 @@ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_OUTEN_OFFSET(bank_num));
reg |= BIT(bank_pin_num);
writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_OUTEN_OFFSET(bank_num));
+ spin_unlock_irqrestore(&gpio->dirlock, flags);
/* set the state of the pin */
zynq_gpio_set_value(chip, pin, state);
@@ -644,6 +658,8 @@ static void zynq_gpio_irqhandler(struct irq_desc *desc)
int_enb = readl_relaxed(gpio->base_addr +
ZYNQ_GPIO_INTMASK_OFFSET(bank_num));
zynq_gpio_handle_bank_irq(gpio, bank_num, int_sts & ~int_enb);
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank_num = bank_num + VERSAL_UNUSED_BANKS;
}
chained_irq_exit(irqchip, desc);
@@ -673,6 +689,8 @@ static void zynq_gpio_save_context(struct zynq_gpio *gpio)
gpio->context.int_any[bank_num] =
readl_relaxed(gpio->base_addr +
ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank_num = bank_num + VERSAL_UNUSED_BANKS;
}
}
@@ -704,6 +722,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
writel_relaxed(~(gpio->context.int_en[bank_num]),
gpio->base_addr +
ZYNQ_GPIO_INTEN_OFFSET(bank_num));
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank_num = bank_num + VERSAL_UNUSED_BANKS;
}
}
@@ -712,6 +732,9 @@ static int __maybe_unused zynq_gpio_suspend(struct device *dev)
struct zynq_gpio *gpio = dev_get_drvdata(dev);
struct irq_data *data = irq_get_irq_data(gpio->irq);
+ if (!device_may_wakeup(dev))
+ disable_irq(gpio->irq);
+
if (!irqd_is_wakeup_set(data)) {
zynq_gpio_save_context(gpio);
return pm_runtime_force_suspend(dev);
@@ -726,6 +749,9 @@ static int __maybe_unused zynq_gpio_resume(struct device *dev)
struct irq_data *data = irq_get_irq_data(gpio->irq);
int ret;
+ if (!device_may_wakeup(dev))
+ enable_irq(gpio->irq);
+
if (!irqd_is_wakeup_set(data)) {
ret = pm_runtime_force_resume(dev);
zynq_gpio_restore_context(gpio);
@@ -775,6 +801,17 @@ static const struct dev_pm_ops zynq_gpio_dev_pm_ops = {
zynq_gpio_runtime_resume, NULL)
};
+static const struct zynq_platform_data versal_gpio_def = {
+ .label = "versal_gpio",
+ .quirks = GPIO_QUIRK_VERSAL,
+ .ngpio = 58,
+ .max_bank = VERSAL_GPIO_MAX_BANK,
+ .bank_min[0] = 0,
+ .bank_max[0] = 25, /* 0 to 25 are connected to MIOs (26 pins) */
+ .bank_min[3] = 26,
+ .bank_max[3] = 57, /* Bank 3 is connected to FMIOs (32 pins) */
+};
+
static const struct zynq_platform_data zynqmp_gpio_def = {
.label = "zynqmp_gpio",
.quirks = GPIO_QUIRK_DATA_RO_BUG,
@@ -812,6 +849,7 @@ static const struct zynq_platform_data zynq_gpio_def = {
static const struct of_device_id zynq_gpio_of_match[] = {
{ .compatible = "xlnx,zynq-gpio-1.0", .data = &zynq_gpio_def },
{ .compatible = "xlnx,zynqmp-gpio-1.0", .data = &zynqmp_gpio_def },
+ { .compatible = "xlnx,versal-gpio-1.0", .data = &versal_gpio_def },
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, zynq_gpio_of_match);
@@ -883,6 +921,8 @@ static int zynq_gpio_probe(struct platform_device *pdev)
return ret;
}
+ spin_lock_init(&gpio->dirlock);
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
@@ -897,9 +937,12 @@ static int zynq_gpio_probe(struct platform_device *pdev)
}
/* disable interrupts for all banks */
- for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++)
+ for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) {
writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank_num = bank_num + VERSAL_UNUSED_BANKS;
+ }
ret = gpiochip_irqchip_add(chip, &zynq_gpio_edge_irqchip, 0,
handle_level_irq, IRQ_TYPE_NONE);
@@ -911,6 +954,8 @@ static int zynq_gpio_probe(struct platform_device *pdev)
gpiochip_set_chained_irqchip(chip, &zynq_gpio_edge_irqchip, gpio->irq,
zynq_gpio_irqhandler);
+ irq_set_status_flags(gpio->irq, IRQ_DISABLE_UNLAZY);
+ device_init_wakeup(&pdev->dev, 1);
pm_runtime_put(&pdev->dev);
return 0;
@@ -963,7 +1008,7 @@ static int __init zynq_gpio_init(void)
{
return platform_driver_register(&zynq_gpio_driver);
}
-postcore_initcall(zynq_gpio_init);
+subsys_initcall(zynq_gpio_init);
static void __exit zynq_gpio_exit(void)
{
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 36f900d63979..7b764cce6028 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -307,6 +307,10 @@ source "drivers/gpu/drm/bridge/Kconfig"
source "drivers/gpu/drm/sti/Kconfig"
+source "drivers/gpu/drm/xilinx/Kconfig"
+
+source "drivers/gpu/drm/zocl/Kconfig"
+
source "drivers/gpu/drm/imx/Kconfig"
source "drivers/gpu/drm/v3d/Kconfig"
@@ -331,6 +335,8 @@ source "drivers/gpu/drm/tinydrm/Kconfig"
source "drivers/gpu/drm/pl111/Kconfig"
+source "drivers/gpu/drm/xlnx/Kconfig"
+
source "drivers/gpu/drm/tve200/Kconfig"
source "drivers/gpu/drm/xen/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 72f5036d9bfa..269c153f1f8a 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -93,6 +93,8 @@ obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STM) += stm/
obj-$(CONFIG_DRM_STI) += sti/
+obj-$(CONFIG_DRM_XILINX) += xilinx/
+obj-$(CONFIG_DRM_ZOCL)) += zocl/
obj-$(CONFIG_DRM_IMX) += imx/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-$(CONFIG_DRM_MESON) += meson/
@@ -107,6 +109,7 @@ obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
obj-$(CONFIG_DRM_PL111) += pl111/
+obj-$(CONFIG_DRM_XLNX) += xlnx/
obj-$(CONFIG_DRM_TVE200) += tve200/
obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 6ea55fb4526d..c6ed69ac7d0d 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -224,6 +224,8 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XV15, .depth = 0, .num_planes = 2, .pixels_per_macropixel = { 3, 3, 0 }, .bytes_per_macropixel = { 4, 8, 0 }, .hsub = 2, .vsub = 2, }, /* FIXME consider is_yuv = true */
+ { .format = DRM_FORMAT_XV20, .depth = 0, .num_planes = 2, .pixels_per_macropixel = { 3, 3, 0 }, .bytes_per_macropixel = { 4, 8, 0 }, .hsub = 2, .vsub = 1, },
{ .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
@@ -274,6 +276,11 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_YUV420_10BIT, .depth = 0,
.num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
.is_yuv = true },
+ { .format = DRM_FORMAT_AVUY, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XVUY8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XVUY2101010, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_Y8, .depth = 0, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_Y10, .depth = 0, .num_planes = 1, .pixels_per_macropixel = { 3, 0, 0 }, .bytes_per_macropixel = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
};
unsigned int i;
@@ -511,3 +518,38 @@ uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
drm_format_info_block_height(info, plane));
}
EXPORT_SYMBOL(drm_format_info_min_pitch);
+
+/**
+ * drm_format_plane_width_bytes - bytes of the given width of the plane
+ * @info: DRM format information
+ * @plane: plane index
+ * @width: width to get the number of bytes
+ *
+ * This returns the number of bytes for given @width and @plane.
+ * The @cpp or macro pixel information should be valid.
+ *
+ * Returns:
+ * The bytes of @width of @plane. 0 for invalid format info.
+ */
+int drm_format_plane_width_bytes(const struct drm_format_info *info,
+ int plane, int width)
+{
+ if (!info || plane >= info->num_planes)
+ return 0;
+
+ if (info->cpp[plane])
+ return info->cpp[plane] * width;
+
+ if (WARN_ON(!info->bytes_per_macropixel[plane] ||
+ !info->pixels_per_macropixel[plane])) {
+ struct drm_format_name_buf buf;
+
+ DRM_WARN("Either cpp or macro-pixel info should be valid: %s\n",
+ drm_get_format_name(info->format, &buf));
+ return 0;
+ }
+
+ return DIV_ROUND_UP(width * info->bytes_per_macropixel[plane],
+ info->pixels_per_macropixel[plane]);
+}
+EXPORT_SYMBOL(drm_format_plane_width_bytes);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 45f6f11a88a7..3eebc5eca7b2 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -290,7 +290,8 @@ drm_internal_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *fb;
int ret;
- if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
+ if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS |
+ DRM_MODE_FB_ALTERNATE_TOP | DRM_MODE_FB_ALTERNATE_BOTTOM)) {
DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 3a7410057c92..c3998bc6f968 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2976,6 +2976,34 @@ static const struct panel_desc_dsi auo_b080uan01 = {
.lanes = 4,
};
+static const struct drm_display_mode auo_b101uan01_mode = {
+ .clock = 154500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 185,
+ .hsync_end = 1920 + 185,
+ .htotal = 1920 + 185 + 925,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 3,
+ .vsync_end = 1200 + 3 + 5,
+ .vtotal = 1200 + 3 + 5 + 4,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc_dsi auo_b101uan01 = {
+ .desc = {
+ .modes = &auo_b101uan01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 108,
+ .height = 272,
+ },
+ },
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
static const struct drm_display_mode boe_tv080wum_nl0_mode = {
.clock = 160000,
.hdisplay = 1200,
@@ -3124,6 +3152,9 @@ static const struct of_device_id dsi_of_match[] = {
.compatible = "auo,b080uan01",
.data = &auo_b080uan01
}, {
+ .compatible = "auo,b101uan01",
+ .data = &auo_b101uan01
+ }, {
.compatible = "boe,tv080wum-nl0",
.data = &boe_tv080wum_nl0
}, {
diff --git a/drivers/gpu/drm/xilinx/Kconfig b/drivers/gpu/drm/xilinx/Kconfig
new file mode 100644
index 000000000000..57e18a9d774d
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/Kconfig
@@ -0,0 +1,59 @@
+config DRM_XILINX
+ tristate "Xilinx DRM"
+ depends on DRM && HAVE_CLK
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DMA_ENGINE
+ select DRM_XILINX_DP_SUB
+ select XILINX_FRMBUF
+ help
+ DRM display driver for Xilinx IP based pipelines.
+
+config DRM_XILINX_DP
+ tristate "Xilinx DRM Display Port Driver"
+ depends on DRM_XILINX
+ help
+ DRM driver for Xilinx Display Port IP.
+
+config DRM_XILINX_DP_DEBUG_FS
+ bool "Xilinx DRM DP debugfs"
+ depends on DEBUG_FS && DRM_XILINX_DP
+ help
+ Enable the debugfs code for DPDMA driver. The debugfs code
+ enables debugging or testing related features. It exposes some
+ low level controls to the user space to help testing automation,
+ as well as can enable additional diagnostic or statistical
+ information.
+
+config DRM_XILINX_DP_SUB
+ tristate "Xilinx DRM Display Port Subsystem Driver"
+ depends on DRM_XILINX
+ select DRM_XILINX_DP
+ help
+ DRM driver for Xilinx Display Port Subsystem.
+
+config DRM_XILINX_DP_SUB_DEBUG_FS
+ bool "Xilinx DRM DPSUB debugfs"
+ depends on DEBUG_FS && DRM_XILINX_DP_SUB
+ select DRM_XILINX_DP_DEBUG_FS
+ help
+ Enable the debugfs code for DP Sub driver. The debugfs code
+ enables debugging or testing related features. It exposes some
+ low level controls to the user space to help testing automation,
+ as well as can enable additional diagnostic or statistical
+ information.
+
+config DRM_XILINX_MIPI_DSI
+ tristate "Xilinx DRM MIPI DSI Driver"
+ depends on DRM_XILINX
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ help
+ DRM driver for Xilinx MIPI DSI IP.
+
+config DRM_XILINX_SDI
+ tristate "Xilinx DRM SDI Subsystem Driver"
+ depends on DRM_XILINX
+ help
+ DRM driver for Xilinx SDI Tx Subsystem.
diff --git a/drivers/gpu/drm/xilinx/Makefile b/drivers/gpu/drm/xilinx/Makefile
new file mode 100644
index 000000000000..19bc1541ca17
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+xilinx_drm-y := xilinx_drm_crtc.o xilinx_drm_connector.o xilinx_drm_drv.o \
+ xilinx_drm_encoder.o xilinx_drm_fb.o xilinx_drm_gem.o \
+ xilinx_drm_plane.o
+xilinx_drm-y += xilinx_cresample.o xilinx_osd.o xilinx_rgb2yuv.o xilinx_vtc.o
+
+obj-$(CONFIG_DRM_XILINX) += xilinx_drm.o
+obj-$(CONFIG_DRM_XILINX_DP) += xilinx_drm_dp.o
+obj-$(CONFIG_DRM_XILINX_DP_SUB) += xilinx_drm_dp_sub.o
+obj-$(CONFIG_DRM_XILINX_MIPI_DSI) += xilinx_drm_dsi.o
+obj-$(CONFIG_DRM_XILINX_SDI) += xilinx_drm_sdi.o
diff --git a/drivers/gpu/drm/xilinx/xilinx_cresample.c b/drivers/gpu/drm/xilinx/xilinx_cresample.c
new file mode 100644
index 000000000000..6ddad66913ae
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_cresample.c
@@ -0,0 +1,154 @@
+/*
+ * Xilinx Chroma Resampler support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "xilinx_drm_drv.h"
+
+#include "xilinx_cresample.h"
+
+/* registers */
+/* general control registers */
+#define CRESAMPLE_CONTROL 0x0000
+
+/* horizontal and vertical active frame size */
+#define CRESAMPLE_ACTIVE_SIZE 0x0020
+
+/* control register bit definition */
+#define CRESAMPLE_CTL_EN (1 << 0) /* enable */
+#define CRESAMPLE_CTL_RU (1 << 1) /* reg update */
+#define CRESAMPLE_CTL_RESET (1 << 31) /* instant reset */
+
+struct xilinx_cresample {
+ void __iomem *base;
+ const char *input_format_name;
+ const char *output_format_name;
+};
+
+/* enable cresample */
+void xilinx_cresample_enable(struct xilinx_cresample *cresample)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(cresample->base, CRESAMPLE_CONTROL);
+ xilinx_drm_writel(cresample->base, CRESAMPLE_CONTROL,
+ reg | CRESAMPLE_CTL_EN);
+}
+
+/* disable cresample */
+void xilinx_cresample_disable(struct xilinx_cresample *cresample)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(cresample->base, CRESAMPLE_CONTROL);
+ xilinx_drm_writel(cresample->base, CRESAMPLE_CONTROL,
+ reg & ~CRESAMPLE_CTL_EN);
+}
+
+/* configure cresample */
+void xilinx_cresample_configure(struct xilinx_cresample *cresample,
+ int hactive, int vactive)
+{
+ /* configure hsize and vsize */
+ xilinx_drm_writel(cresample->base, CRESAMPLE_ACTIVE_SIZE,
+ (vactive << 16) | hactive);
+}
+
+/* reset cresample */
+void xilinx_cresample_reset(struct xilinx_cresample *cresample)
+{
+ u32 reg;
+
+ xilinx_drm_writel(cresample->base, CRESAMPLE_CONTROL,
+ CRESAMPLE_CTL_RESET);
+
+ /* enable register update */
+ reg = xilinx_drm_readl(cresample->base, CRESAMPLE_CONTROL);
+ xilinx_drm_writel(cresample->base, CRESAMPLE_CONTROL,
+ reg | CRESAMPLE_CTL_RU);
+}
+
+/* get an input format */
+const char *
+xilinx_cresample_get_input_format_name(struct xilinx_cresample *cresample)
+{
+ return cresample->input_format_name;
+}
+
+/* get an output format */
+const char *
+xilinx_cresample_get_output_format_name(struct xilinx_cresample *cresample)
+{
+ return cresample->output_format_name;
+}
+
+static const struct of_device_id xilinx_cresample_of_match[] = {
+ { .compatible = "xlnx,v-cresample-3.01.a" },
+ { /* end of table */ },
+};
+
+struct xilinx_cresample *xilinx_cresample_probe(struct device *dev,
+ struct device_node *node)
+{
+ struct xilinx_cresample *cresample;
+ const struct of_device_id *match;
+ struct resource res;
+ int ret;
+
+ match = of_match_node(xilinx_cresample_of_match, node);
+ if (!match) {
+ dev_err(dev, "failed to match the device node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ cresample = devm_kzalloc(dev, sizeof(*cresample), GFP_KERNEL);
+ if (!cresample)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to of_address_to_resource\n");
+ return ERR_PTR(ret);
+ }
+
+ cresample->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(cresample->base))
+ return ERR_CAST(cresample->base);
+
+ ret = of_property_read_string(node, "xlnx,input-format",
+ &cresample->input_format_name);
+ if (ret) {
+ dev_warn(dev, "failed to get an input format prop\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = of_property_read_string(node, "xlnx,output-format",
+ &cresample->output_format_name);
+ if (ret) {
+ dev_warn(dev, "failed to get an output format prop\n");
+ return ERR_PTR(ret);
+ }
+
+ xilinx_cresample_reset(cresample);
+
+ return cresample;
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_cresample.h b/drivers/gpu/drm/xilinx/xilinx_cresample.h
new file mode 100644
index 000000000000..34323c722881
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_cresample.h
@@ -0,0 +1,40 @@
+/*
+ * Xilinx Chroma Resampler Header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_CRESAMPLE_H_
+#define _XILINX_CRESAMPLE_H_
+
+struct xilinx_cresample;
+
+void xilinx_cresample_configure(struct xilinx_cresample *cresample,
+ int hactive, int vactive);
+void xilinx_cresample_reset(struct xilinx_cresample *cresample);
+void xilinx_cresample_enable(struct xilinx_cresample *cresample);
+void xilinx_cresample_disable(struct xilinx_cresample *cresample);
+
+const char *
+xilinx_cresample_get_input_format_name(struct xilinx_cresample *cresample);
+const char *
+xilinx_cresample_get_output_format_name(struct xilinx_cresample *cresample);
+
+struct device;
+struct device_node;
+
+struct xilinx_cresample *xilinx_cresample_probe(struct device *dev,
+ struct device_node *node);
+
+#endif /* _XILINX_CRESAMPLE_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_connector.c b/drivers/gpu/drm/xilinx/xilinx_drm_connector.c
new file mode 100644
index 000000000000..b37bb50108da
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_connector.c
@@ -0,0 +1,204 @@
+/*
+ * Xilinx DRM connector driver for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include <linux/device.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_connector.h"
+
+struct xilinx_drm_connector {
+ struct drm_connector base;
+ struct drm_encoder *encoder;
+};
+
+struct xilinx_drm_connector_type {
+ const char *name;
+ const int type;
+};
+
+#define to_xilinx_connector(x) \
+ container_of(x, struct xilinx_drm_connector, base)
+
+/* get mode list */
+static int xilinx_drm_connector_get_modes(struct drm_connector *base_connector)
+{
+ struct xilinx_drm_connector *connector =
+ to_xilinx_connector(base_connector);
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_encoder_slave *encoder_slave = to_encoder_slave(encoder);
+ const struct drm_encoder_slave_funcs *encoder_sfuncs =
+ encoder_slave->slave_funcs;
+ int count = 0;
+
+ if (encoder_sfuncs->get_modes)
+ count = encoder_sfuncs->get_modes(encoder, base_connector);
+
+ return count;
+}
+
+/* check if mode is valid */
+static int xilinx_drm_connector_mode_valid(struct drm_connector *base_connector,
+ struct drm_display_mode *mode)
+{
+ struct xilinx_drm_connector *connector =
+ to_xilinx_connector(base_connector);
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_encoder_slave *encoder_slave = to_encoder_slave(encoder);
+ const struct drm_encoder_slave_funcs *encoder_sfuncs =
+ encoder_slave->slave_funcs;
+ int ret = MODE_OK;
+
+ if (encoder_sfuncs->mode_valid)
+ ret = encoder_sfuncs->mode_valid(encoder, mode);
+
+ return ret;
+}
+
+/* find best encoder: return stored encoder */
+static struct drm_encoder *
+xilinx_drm_connector_best_encoder(struct drm_connector *base_connector)
+{
+ struct xilinx_drm_connector *connector =
+ to_xilinx_connector(base_connector);
+
+ return connector->encoder;
+}
+
+static struct drm_connector_helper_funcs xilinx_drm_connector_helper_funcs = {
+ .get_modes = xilinx_drm_connector_get_modes,
+ .mode_valid = xilinx_drm_connector_mode_valid,
+ .best_encoder = xilinx_drm_connector_best_encoder,
+};
+
+static enum drm_connector_status
+xilinx_drm_connector_detect(struct drm_connector *base_connector, bool force)
+{
+ struct xilinx_drm_connector *connector =
+ to_xilinx_connector(base_connector);
+ enum drm_connector_status status = connector_status_unknown;
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_encoder_slave *encoder_slave = to_encoder_slave(encoder);
+ const struct drm_encoder_slave_funcs *encoder_sfuncs =
+ encoder_slave->slave_funcs;
+
+ if (encoder_sfuncs->detect)
+ status = encoder_sfuncs->detect(encoder, base_connector);
+
+ /* some connector ignores the first hpd, so try again if forced */
+ if (force && (status != connector_status_connected))
+ status = encoder_sfuncs->detect(encoder, base_connector);
+
+ DRM_DEBUG_KMS("status: %d\n", status);
+
+ return status;
+}
+
+/* destroy connector */
+void xilinx_drm_connector_destroy(struct drm_connector *base_connector)
+{
+ drm_connector_unregister(base_connector);
+ drm_connector_cleanup(base_connector);
+}
+
+static const struct drm_connector_funcs xilinx_drm_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = xilinx_drm_connector_detect,
+ .destroy = xilinx_drm_connector_destroy,
+};
+
+static const struct xilinx_drm_connector_type connector_types[] = {
+ { "HDMIA", DRM_MODE_CONNECTOR_HDMIA },
+ { "DisplayPort", DRM_MODE_CONNECTOR_DisplayPort },
+};
+
+/* create connector */
+struct drm_connector *
+xilinx_drm_connector_create(struct drm_device *drm,
+ struct drm_encoder *base_encoder, int id)
+{
+ struct xilinx_drm_connector *connector;
+ const char *string;
+ int type = DRM_MODE_CONNECTOR_Unknown;
+ int i, ret;
+
+ connector = devm_kzalloc(drm->dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return ERR_PTR(-ENOMEM);
+
+ connector->base.polled = DRM_CONNECTOR_POLL_HPD |
+ DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ ret = of_property_read_string_index(drm->dev->of_node,
+ "xlnx,connector-type", id, &string);
+ if (ret < 0) {
+ dev_err(drm->dev, "No connector type in DT\n");
+ return ERR_PTR(ret);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(connector_types); i++)
+ if (strcmp(connector_types[i].name, string) == 0) {
+ type = connector_types[i].type;
+ break;
+ }
+
+ if (type == DRM_MODE_CONNECTOR_Unknown) {
+ dev_err(drm->dev, "Unknown connector type in DT\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = drm_connector_init(drm, &connector->base,
+ &xilinx_drm_connector_funcs, type);
+ if (ret) {
+ DRM_ERROR("failed to initialize connector\n");
+ return ERR_PTR(ret);
+ }
+
+ drm_connector_helper_add(&connector->base,
+ &xilinx_drm_connector_helper_funcs);
+
+ /* add entry for connector */
+ ret = drm_connector_register(&connector->base);
+ if (ret) {
+ DRM_ERROR("failed to register a connector\n");
+ goto err_register;
+ }
+
+ /* connect connector and encoder */
+ ret = drm_connector_attach_encoder(&connector->base, base_encoder);
+ if (ret) {
+ DRM_ERROR("failed to attach connector to encoder\n");
+ goto err_attach;
+ }
+ connector->encoder = base_encoder;
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+
+ return &connector->base;
+
+err_attach:
+ drm_connector_unregister(&connector->base);
+err_register:
+ drm_connector_cleanup(&connector->base);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_connector.h b/drivers/gpu/drm/xilinx/xilinx_drm_connector.h
new file mode 100644
index 000000000000..750bfd8d1e86
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_connector.h
@@ -0,0 +1,29 @@
+/*
+ * Xilinx DRM connector header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_CONNECTOR_H_
+#define _XILINX_DRM_CONNECTOR_H_
+
+struct drm_device;
+struct drm_connector;
+
+struct drm_connector *
+xilinx_drm_connector_create(struct drm_device *drm,
+ struct drm_encoder *base_encoder, int id);
+void xilinx_drm_connector_destroy(struct drm_connector *base_connector);
+
+#endif /* _XILINX_DRM_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_crtc.c b/drivers/gpu/drm/xilinx/xilinx_drm_crtc.c
new file mode 100644
index 000000000000..66513b13b045
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_crtc.c
@@ -0,0 +1,595 @@
+/*
+ * Xilinx DRM crtc driver for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+
+#include <video/videomode.h>
+
+#include "xilinx_drm_crtc.h"
+#include "xilinx_drm_dp_sub.h"
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_plane.h"
+
+#include "xilinx_cresample.h"
+#include "xilinx_rgb2yuv.h"
+#include "xilinx_vtc.h"
+#include "xilinx_drm_sdi.h"
+
+struct xilinx_drm_crtc {
+ struct drm_crtc base;
+ struct xilinx_cresample *cresample;
+ struct xilinx_rgb2yuv *rgb2yuv;
+ struct clk *pixel_clock;
+ bool pixel_clock_enabled;
+ struct xilinx_vtc *vtc;
+ struct xilinx_drm_plane_manager *plane_manager;
+ int dpms;
+ unsigned int alpha;
+ struct drm_pending_vblank_event *event;
+ struct xilinx_drm_dp_sub *dp_sub;
+ struct xilinx_sdi *sdi;
+};
+
+#define to_xilinx_crtc(x) container_of(x, struct xilinx_drm_crtc, base)
+
+/* set crtc dpms */
+static void xilinx_drm_crtc_dpms(struct drm_crtc *base_crtc, int dpms)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ int ret;
+
+ DRM_DEBUG_KMS("dpms: %d -> %d\n", crtc->dpms, dpms);
+
+ if (crtc->dpms == dpms)
+ return;
+
+ crtc->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ if (!crtc->pixel_clock_enabled) {
+ ret = clk_prepare_enable(crtc->pixel_clock);
+ if (ret)
+ DRM_ERROR("failed to enable a pixel clock\n");
+ else
+ crtc->pixel_clock_enabled = true;
+ }
+
+ xilinx_drm_plane_manager_dpms(crtc->plane_manager, dpms);
+ xilinx_drm_plane_dpms(base_crtc->primary, dpms);
+ if (crtc->rgb2yuv)
+ xilinx_rgb2yuv_enable(crtc->rgb2yuv);
+ if (crtc->cresample)
+ xilinx_cresample_enable(crtc->cresample);
+ if (crtc->vtc)
+ xilinx_vtc_enable(crtc->vtc);
+ break;
+ default:
+ if (crtc->vtc) {
+ xilinx_vtc_disable(crtc->vtc);
+ xilinx_vtc_reset(crtc->vtc);
+ }
+ if (crtc->cresample) {
+ xilinx_cresample_disable(crtc->cresample);
+ xilinx_cresample_reset(crtc->cresample);
+ }
+ if (crtc->rgb2yuv) {
+ xilinx_rgb2yuv_disable(crtc->rgb2yuv);
+ xilinx_rgb2yuv_reset(crtc->rgb2yuv);
+ }
+ xilinx_drm_plane_dpms(base_crtc->primary, dpms);
+ xilinx_drm_plane_manager_dpms(crtc->plane_manager, dpms);
+ if (crtc->pixel_clock_enabled) {
+ clk_disable_unprepare(crtc->pixel_clock);
+ crtc->pixel_clock_enabled = false;
+ }
+ break;
+ }
+}
+
+/* prepare crtc */
+static void xilinx_drm_crtc_prepare(struct drm_crtc *base_crtc)
+{
+ xilinx_drm_crtc_dpms(base_crtc, DRM_MODE_DPMS_OFF);
+}
+
+/* apply mode to crtc pipe */
+static void xilinx_drm_crtc_commit(struct drm_crtc *base_crtc)
+{
+ xilinx_drm_crtc_dpms(base_crtc, DRM_MODE_DPMS_ON);
+ xilinx_drm_plane_commit(base_crtc->primary);
+}
+
+/* fix mode */
+static bool xilinx_drm_crtc_mode_fixup(struct drm_crtc *base_crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* no op */
+ return true;
+}
+
+/* set new mode in crtc pipe */
+static int xilinx_drm_crtc_mode_set(struct drm_crtc *base_crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ struct videomode vm;
+ long diff;
+ int ret;
+
+ if (crtc->pixel_clock_enabled) {
+ clk_disable_unprepare(crtc->pixel_clock);
+ crtc->pixel_clock_enabled = false;
+ }
+
+ /* set pixel clock */
+ ret = clk_set_rate(crtc->pixel_clock, adjusted_mode->clock * 1000);
+ if (ret) {
+ DRM_ERROR("failed to set a pixel clock\n");
+ return ret;
+ }
+
+ diff = clk_get_rate(crtc->pixel_clock) - adjusted_mode->clock * 1000;
+ if (abs(diff) > (adjusted_mode->clock * 1000) / 20)
+ DRM_DEBUG_KMS("actual pixel clock rate(%d) is off by %ld\n",
+ adjusted_mode->clock, diff);
+
+ if (crtc->vtc) {
+ /* set video timing */
+ vm.hactive = adjusted_mode->hdisplay;
+ vm.hfront_porch = adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay;
+ vm.hback_porch = adjusted_mode->htotal -
+ adjusted_mode->hsync_end;
+ vm.hsync_len = adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start;
+
+ vm.vactive = adjusted_mode->vdisplay;
+ vm.vfront_porch = adjusted_mode->vsync_start -
+ adjusted_mode->vdisplay;
+ vm.vback_porch = adjusted_mode->vtotal -
+ adjusted_mode->vsync_end;
+ vm.vsync_len = adjusted_mode->vsync_end -
+ adjusted_mode->vsync_start;
+
+ vm.flags = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vm.flags |= DISPLAY_FLAGS_INTERLACED;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ vm.flags |= DISPLAY_FLAGS_HSYNC_LOW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ vm.flags |= DISPLAY_FLAGS_VSYNC_LOW;
+
+ xilinx_vtc_config_sig(crtc->vtc, &vm);
+ }
+
+ /* configure cresample and rgb2yuv */
+ if (crtc->cresample)
+ xilinx_cresample_configure(crtc->cresample,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+ if (crtc->rgb2yuv)
+ xilinx_rgb2yuv_configure(crtc->rgb2yuv,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+
+ /* configure a plane: vdma and osd layer */
+ xilinx_drm_plane_manager_mode_set(crtc->plane_manager,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+ ret = xilinx_drm_plane_mode_set(base_crtc->primary,
+ base_crtc->primary->fb, 0, 0,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay,
+ x, y,
+ adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay);
+ if (ret) {
+ DRM_ERROR("failed to mode set a plane\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int _xilinx_drm_crtc_mode_set_base(struct drm_crtc *base_crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ int ret;
+
+ /* configure a plane */
+ xilinx_drm_plane_manager_mode_set(crtc->plane_manager,
+ base_crtc->hwmode.hdisplay,
+ base_crtc->hwmode.vdisplay);
+ ret = xilinx_drm_plane_mode_set(base_crtc->primary,
+ fb, 0, 0,
+ base_crtc->hwmode.hdisplay,
+ base_crtc->hwmode.vdisplay,
+ x, y,
+ base_crtc->hwmode.hdisplay,
+ base_crtc->hwmode.vdisplay);
+ if (ret) {
+ DRM_ERROR("failed to mode set a plane\n");
+ return ret;
+ }
+
+ /* apply the new fb addr */
+ xilinx_drm_crtc_commit(base_crtc);
+
+ return 0;
+}
+
+/* update address and information from fb */
+static int xilinx_drm_crtc_mode_set_base(struct drm_crtc *base_crtc,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ /* configure a plane */
+ return _xilinx_drm_crtc_mode_set_base(base_crtc, base_crtc->primary->fb,
+ x, y);
+}
+
+static struct drm_crtc_helper_funcs xilinx_drm_crtc_helper_funcs = {
+ .dpms = xilinx_drm_crtc_dpms,
+ .prepare = xilinx_drm_crtc_prepare,
+ .commit = xilinx_drm_crtc_commit,
+ .mode_fixup = xilinx_drm_crtc_mode_fixup,
+ .mode_set = xilinx_drm_crtc_mode_set,
+ .mode_set_base = xilinx_drm_crtc_mode_set_base,
+};
+
+/* destroy crtc */
+void xilinx_drm_crtc_destroy(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ /* make sure crtc is off */
+ xilinx_drm_crtc_dpms(base_crtc, DRM_MODE_DPMS_OFF);
+
+ drm_crtc_cleanup(base_crtc);
+
+ if (crtc->dp_sub)
+ xilinx_drm_dp_sub_put(crtc->dp_sub);
+
+ if (crtc->pixel_clock_enabled) {
+ clk_disable_unprepare(crtc->pixel_clock);
+ crtc->pixel_clock_enabled = false;
+ }
+
+ xilinx_drm_plane_remove_manager(crtc->plane_manager);
+}
+
+/* cancel page flip functions */
+void xilinx_drm_crtc_cancel_page_flip(struct drm_crtc *base_crtc,
+ struct drm_file *file)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ struct drm_device *drm = base_crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ event = crtc->event;
+ if (event && (event->base.file_priv == file)) {
+ crtc->event = NULL;
+ kfree(&event->base);
+ drm_crtc_vblank_put(base_crtc);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+/* finish page flip functions */
+static void xilinx_drm_crtc_finish_page_flip(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ struct drm_device *drm = base_crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ event = crtc->event;
+ crtc->event = NULL;
+ if (event) {
+ drm_crtc_send_vblank_event(base_crtc, event);
+ drm_crtc_vblank_put(base_crtc);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+/* page flip functions */
+static int xilinx_drm_crtc_page_flip(struct drm_crtc *base_crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+ struct drm_device *drm = base_crtc->dev;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ if (crtc->event) {
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+
+ /* configure a plane */
+ ret = _xilinx_drm_crtc_mode_set_base(base_crtc, fb,
+ base_crtc->x, base_crtc->y);
+ if (ret) {
+ DRM_ERROR("failed to mode set a plane\n");
+ return ret;
+ }
+
+ base_crtc->primary->fb = fb;
+
+ if (event) {
+ event->pipe = 0;
+ drm_crtc_vblank_get(base_crtc);
+ spin_lock_irqsave(&drm->event_lock, flags);
+ crtc->event = event;
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+
+ return 0;
+}
+
+/* vblank interrupt handler */
+static void xilinx_drm_crtc_vblank_handler(void *data)
+{
+ struct drm_crtc *base_crtc = data;
+ struct drm_device *drm;
+
+ if (!base_crtc)
+ return;
+
+ drm = base_crtc->dev;
+
+ drm_handle_vblank(drm, 0);
+ xilinx_drm_crtc_finish_page_flip(base_crtc);
+}
+
+/* enable vblank interrupt */
+void xilinx_drm_crtc_enable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ if (crtc->vtc)
+ xilinx_vtc_enable_vblank_intr(crtc->vtc,
+ xilinx_drm_crtc_vblank_handler,
+ base_crtc);
+ if (crtc->dp_sub)
+ xilinx_drm_dp_sub_enable_vblank(crtc->dp_sub,
+ xilinx_drm_crtc_vblank_handler,
+ base_crtc);
+#ifdef CONFIG_DRM_XILINX_SDI
+ if (crtc->sdi)
+ xilinx_drm_sdi_enable_vblank(crtc->sdi,
+ xilinx_drm_crtc_vblank_handler,
+ base_crtc);
+#endif
+}
+
+/* disable vblank interrupt */
+void xilinx_drm_crtc_disable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ if (crtc->dp_sub)
+ xilinx_drm_dp_sub_disable_vblank(crtc->dp_sub);
+ if (crtc->vtc)
+ xilinx_vtc_disable_vblank_intr(crtc->vtc);
+#ifdef CONFIG_DRM_XILINX_SDI
+ if (crtc->sdi)
+ xilinx_drm_sdi_disable_vblank(crtc->sdi);
+#endif
+}
+
+/**
+ * xilinx_drm_crtc_restore - Restore the crtc states
+ * @base_crtc: base crtc object
+ *
+ * Restore the crtc states to the default ones. The request is propagated
+ * to the plane driver.
+ */
+void xilinx_drm_crtc_restore(struct drm_crtc *base_crtc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ xilinx_drm_plane_restore(crtc->plane_manager);
+}
+
+/* check max width */
+unsigned int xilinx_drm_crtc_get_max_width(struct drm_crtc *base_crtc)
+{
+ return xilinx_drm_plane_get_max_width(base_crtc->primary);
+}
+
+/* check format */
+bool xilinx_drm_crtc_check_format(struct drm_crtc *base_crtc, uint32_t fourcc)
+{
+ struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc);
+
+ return xilinx_drm_plane_check_format(crtc->plane_manager, fourcc);
+}
+
+/* get format */
+uint32_t xilinx_drm_crtc_get_format(struct drm_crtc *base_crtc)
+{
+ return xilinx_drm_plane_get_format(base_crtc->primary);
+}
+
+/**
+ * xilinx_drm_crtc_get_align - Get the alignment value for pitch
+ * @base_crtc: Base crtc object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+unsigned int xilinx_drm_crtc_get_align(struct drm_crtc *base_crtc)
+{
+ return xilinx_drm_plane_get_align(base_crtc->primary);
+}
+
+static struct drm_crtc_funcs xilinx_drm_crtc_funcs = {
+ .destroy = xilinx_drm_crtc_destroy,
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = xilinx_drm_crtc_page_flip,
+};
+
+/* create crtc */
+struct drm_crtc *xilinx_drm_crtc_create(struct drm_device *drm)
+{
+ struct xilinx_drm_crtc *crtc;
+ struct drm_plane *primary_plane;
+ struct device_node *sub_node;
+ int possible_crtcs = 1;
+ int ret;
+
+ crtc = devm_kzalloc(drm->dev, sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return ERR_PTR(-ENOMEM);
+
+ /* probe chroma resampler and enable */
+ sub_node = of_parse_phandle(drm->dev->of_node, "xlnx,cresample", 0);
+ if (sub_node) {
+ crtc->cresample = xilinx_cresample_probe(drm->dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(crtc->cresample)) {
+ DRM_ERROR("failed to probe a cresample\n");
+ return ERR_CAST(crtc->cresample);
+ }
+ }
+
+ /* probe color space converter and enable */
+ sub_node = of_parse_phandle(drm->dev->of_node, "xlnx,rgb2yuv", 0);
+ if (sub_node) {
+ crtc->rgb2yuv = xilinx_rgb2yuv_probe(drm->dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(crtc->rgb2yuv)) {
+ DRM_ERROR("failed to probe a rgb2yuv\n");
+ return ERR_CAST(crtc->rgb2yuv);
+ }
+ }
+
+ /* probe a plane manager */
+ crtc->plane_manager = xilinx_drm_plane_probe_manager(drm);
+ if (IS_ERR(crtc->plane_manager)) {
+ if (PTR_ERR(crtc->plane_manager) != -EPROBE_DEFER)
+ DRM_ERROR("failed to probe a plane manager\n");
+ return ERR_CAST(crtc->plane_manager);
+ }
+
+ /* create a primary plane. there's only one crtc now */
+ primary_plane = xilinx_drm_plane_create_primary(crtc->plane_manager,
+ possible_crtcs);
+ if (IS_ERR(primary_plane)) {
+ DRM_ERROR("failed to create a primary plane for crtc\n");
+ ret = PTR_ERR(primary_plane);
+ goto err_plane;
+ }
+
+ /* create extra planes */
+ xilinx_drm_plane_create_planes(crtc->plane_manager, possible_crtcs);
+
+ crtc->pixel_clock = devm_clk_get(drm->dev, NULL);
+ if (IS_ERR(crtc->pixel_clock)) {
+ if (PTR_ERR(crtc->pixel_clock) == -EPROBE_DEFER) {
+ ret = PTR_ERR(crtc->pixel_clock);
+ goto err_plane;
+ } else {
+ DRM_DEBUG_KMS("failed to get pixel clock\n");
+ crtc->pixel_clock = NULL;
+ }
+ }
+
+ ret = clk_prepare_enable(crtc->pixel_clock);
+ if (ret) {
+ DRM_ERROR("failed to enable a pixel clock\n");
+ crtc->pixel_clock_enabled = false;
+ goto err_plane;
+ }
+ clk_disable_unprepare(crtc->pixel_clock);
+
+ sub_node = of_parse_phandle(drm->dev->of_node, "xlnx,vtc", 0);
+ if (sub_node) {
+ crtc->vtc = xilinx_vtc_probe(drm->dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(crtc->vtc)) {
+ DRM_ERROR("failed to probe video timing controller\n");
+ ret = PTR_ERR(crtc->vtc);
+ goto err_pixel_clk;
+ }
+ }
+
+ crtc->dp_sub = xilinx_drm_dp_sub_of_get(drm->dev->of_node);
+ if (IS_ERR(crtc->dp_sub)) {
+ ret = PTR_ERR(crtc->dp_sub);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("failed to get a dp_sub\n");
+ goto err_pixel_clk;
+ }
+
+#ifdef CONFIG_DRM_XILINX_SDI
+ crtc->sdi = xilinx_drm_sdi_of_get(drm->dev->of_node);
+ if (IS_ERR(crtc->sdi)) {
+ ret = PTR_ERR(crtc->sdi);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("failed to get a sdi\n");
+ goto err_pixel_clk;
+ }
+#endif
+ crtc->dpms = DRM_MODE_DPMS_OFF;
+
+ /* initialize drm crtc */
+ ret = drm_crtc_init_with_planes(drm, &crtc->base, primary_plane,
+ NULL, &xilinx_drm_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize crtc\n");
+ goto err_pixel_clk;
+ }
+ drm_crtc_helper_add(&crtc->base, &xilinx_drm_crtc_helper_funcs);
+
+ return &crtc->base;
+
+err_pixel_clk:
+ if (crtc->pixel_clock_enabled) {
+ clk_disable_unprepare(crtc->pixel_clock);
+ crtc->pixel_clock_enabled = false;
+ }
+err_plane:
+ xilinx_drm_plane_remove_manager(crtc->plane_manager);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_crtc.h b/drivers/gpu/drm/xilinx/xilinx_drm_crtc.h
new file mode 100644
index 000000000000..3566e0eba036
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_crtc.h
@@ -0,0 +1,39 @@
+/*
+ * Xilinx DRM crtc header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_CRTC_H_
+#define _XILINX_DRM_CRTC_H_
+
+struct drm_device;
+struct drm_crtc;
+
+void xilinx_drm_crtc_enable_vblank(struct drm_crtc *base_crtc);
+void xilinx_drm_crtc_disable_vblank(struct drm_crtc *base_crtc);
+void xilinx_drm_crtc_cancel_page_flip(struct drm_crtc *base_crtc,
+ struct drm_file *file);
+
+void xilinx_drm_crtc_restore(struct drm_crtc *base_crtc);
+
+unsigned int xilinx_drm_crtc_get_max_width(struct drm_crtc *base_crtc);
+bool xilinx_drm_crtc_check_format(struct drm_crtc *base_crtc, uint32_t fourcc);
+uint32_t xilinx_drm_crtc_get_format(struct drm_crtc *base_crtc);
+unsigned int xilinx_drm_crtc_get_align(struct drm_crtc *base_crtc);
+
+struct drm_crtc *xilinx_drm_crtc_create(struct drm_device *drm);
+void xilinx_drm_crtc_destroy(struct drm_crtc *base_crtc);
+
+#endif /* _XILINX_DRM_CRTC_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_dp.c b/drivers/gpu/drm/xilinx/xilinx_drm_dp.c
new file mode 100644
index 000000000000..fdb5e74cc96c
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_dp.c
@@ -0,0 +1,2186 @@
+/*
+ * Xilinx DRM DisplayPort encoder driver for Xilinx
+ *
+ * Copyright (C) 2014 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-zynqmp.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+
+#include "xilinx_drm_dp_sub.h"
+#include "xilinx_drm_drv.h"
+
+static uint xilinx_drm_dp_aux_timeout_ms = 50;
+module_param_named(aux_timeout_ms, xilinx_drm_dp_aux_timeout_ms, uint, 0444);
+MODULE_PARM_DESC(aux_timeout_ms,
+ "DP aux timeout value in msec (default: 50)");
+
+static uint xilinx_drm_dp_power_on_delay_ms = 4;
+module_param_named(power_on_delay_ms, xilinx_drm_dp_power_on_delay_ms, uint,
+ 0644);
+MODULE_PARM_DESC(power_on_delay,
+ "Delay after power on request in msec (default: 4)");
+
+/* Link configuration registers */
+#define XILINX_DP_TX_LINK_BW_SET 0x0
+#define XILINX_DP_TX_LANE_CNT_SET 0x4
+#define XILINX_DP_TX_ENHANCED_FRAME_EN 0x8
+#define XILINX_DP_TX_TRAINING_PATTERN_SET 0xc
+#define XILINX_DP_TX_SCRAMBLING_DISABLE 0x14
+#define XILINX_DP_TX_DOWNSPREAD_CTL 0x18
+#define XILINX_DP_TX_SW_RESET 0x1c
+#define XILINX_DP_TX_SW_RESET_STREAM1 BIT(0)
+#define XILINX_DP_TX_SW_RESET_STREAM2 BIT(1)
+#define XILINX_DP_TX_SW_RESET_STREAM3 BIT(2)
+#define XILINX_DP_TX_SW_RESET_STREAM4 BIT(3)
+#define XILINX_DP_TX_SW_RESET_AUX BIT(7)
+#define XILINX_DP_TX_SW_RESET_ALL (XILINX_DP_TX_SW_RESET_STREAM1 | \
+ XILINX_DP_TX_SW_RESET_STREAM2 | \
+ XILINX_DP_TX_SW_RESET_STREAM3 | \
+ XILINX_DP_TX_SW_RESET_STREAM4 | \
+ XILINX_DP_TX_SW_RESET_AUX)
+
+/* Core enable registers */
+#define XILINX_DP_TX_ENABLE 0x80
+#define XILINX_DP_TX_ENABLE_MAIN_STREAM 0x84
+#define XILINX_DP_TX_FORCE_SCRAMBLER_RESET 0xc0
+#define XILINX_DP_TX_VERSION 0xf8
+#define XILINX_DP_TX_VERSION_MAJOR_MASK (0xff << 24)
+#define XILINX_DP_TX_VERSION_MAJOR_SHIFT 24
+#define XILINX_DP_TX_VERSION_MINOR_MASK (0xff << 16)
+#define XILINX_DP_TX_VERSION_MINOR_SHIFT 16
+#define XILINX_DP_TX_VERSION_REVISION_MASK (0xf << 12)
+#define XILINX_DP_TX_VERSION_REVISION_SHIFT 12
+#define XILINX_DP_TX_VERSION_PATCH_MASK (0xf << 8)
+#define XILINX_DP_TX_VERSION_PATCH_SHIFT 8
+#define XILINX_DP_TX_VERSION_INTERNAL_MASK (0xff << 0)
+#define XILINX_DP_TX_VERSION_INTERNAL_SHIFT 0
+
+/* Core ID registers */
+#define XILINX_DP_TX_CORE_ID 0xfc
+#define XILINX_DP_TX_CORE_ID_MAJOR_MASK (0xff << 24)
+#define XILINX_DP_TX_CORE_ID_MAJOR_SHIFT 24
+#define XILINX_DP_TX_CORE_ID_MINOR_MASK (0xff << 16)
+#define XILINX_DP_TX_CORE_ID_MINOR_SHIFT 16
+#define XILINX_DP_TX_CORE_ID_REVISION_MASK (0xff << 8)
+#define XILINX_DP_TX_CORE_ID_REVISION_SHIFT 8
+#define XILINX_DP_TX_CORE_ID_DIRECTION BIT(0)
+
+/* AUX channel interface registers */
+#define XILINX_DP_TX_AUX_COMMAND 0x100
+#define XILINX_DP_TX_AUX_COMMAND_CMD_SHIFT 8
+#define XILINX_DP_TX_AUX_COMMAND_ADDRESS_ONLY BIT(12)
+#define XILINX_DP_TX_AUX_COMMAND_BYTES_SHIFT 0
+#define XILINX_DP_TX_AUX_WRITE_FIFO 0x104
+#define XILINX_DP_TX_AUX_ADDRESS 0x108
+#define XILINX_DP_TX_CLK_DIVIDER 0x10c
+#define XILINX_DP_TX_CLK_DIVIDER_MHZ 1000000
+#define XILINX_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT 8
+#define XILINX_DP_TX_INTR_SIGNAL_STATE 0x130
+#define XILINX_DP_TX_INTR_SIGNAL_STATE_HPD BIT(0)
+#define XILINX_DP_TX_INTR_SIGNAL_STATE_REQUEST BIT(1)
+#define XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY BIT(2)
+#define XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT BIT(3)
+#define XILINX_DP_TX_AUX_REPLY_DATA 0x134
+#define XILINX_DP_TX_AUX_REPLY_CODE 0x138
+#define XILINX_DP_TX_AUX_REPLY_CODE_AUX_ACK (0)
+#define XILINX_DP_TX_AUX_REPLY_CODE_AUX_NACK BIT(0)
+#define XILINX_DP_TX_AUX_REPLY_CODE_AUX_DEFER BIT(1)
+#define XILINX_DP_TX_AUX_REPLY_CODE_I2C_ACK (0)
+#define XILINX_DP_TX_AUX_REPLY_CODE_I2C_NACK BIT(2)
+#define XILINX_DP_TX_AUX_REPLY_CODE_I2C_DEFER BIT(3)
+#define XILINX_DP_TX_AUX_REPLY_CNT 0x13c
+#define XILINX_DP_TX_AUX_REPLY_CNT_MASK 0xff
+#define XILINX_DP_TX_INTR_STATUS 0x140
+#define XILINX_DP_TX_INTR_MASK 0x144
+#define XILINX_DP_TX_INTR_HPD_IRQ BIT(0)
+#define XILINX_DP_TX_INTR_HPD_EVENT BIT(1)
+#define XILINX_DP_TX_INTR_REPLY_RECV BIT(2)
+#define XILINX_DP_TX_INTR_REPLY_TIMEOUT BIT(3)
+#define XILINX_DP_TX_INTR_HPD_PULSE BIT(4)
+#define XILINX_DP_TX_INTR_EXT_PKT_TXD BIT(5)
+#define XILINX_DP_TX_INTR_LIV_ABUF_UNDRFLW BIT(12)
+#define XILINX_DP_TX_INTR_VBLANK_START BIT(13)
+#define XILINX_DP_TX_INTR_PIXEL0_MATCH BIT(14)
+#define XILINX_DP_TX_INTR_PIXEL1_MATCH BIT(15)
+#define XILINX_DP_TX_INTR_CHBUF_UNDERFLW_MASK 0x3f0000
+#define XILINX_DP_TX_INTR_CHBUF_OVERFLW_MASK 0xfc00000
+#define XILINX_DP_TX_INTR_CUST_TS_2 BIT(28)
+#define XILINX_DP_TX_INTR_CUST_TS BIT(29)
+#define XILINX_DP_TX_INTR_EXT_VSYNC_TS BIT(30)
+#define XILINX_DP_TX_INTR_VSYNC_TS BIT(31)
+#define XILINX_DP_TX_INTR_ALL (XILINX_DP_TX_INTR_HPD_IRQ | \
+ XILINX_DP_TX_INTR_HPD_EVENT | \
+ XILINX_DP_TX_INTR_REPLY_RECV | \
+ XILINX_DP_TX_INTR_REPLY_TIMEOUT | \
+ XILINX_DP_TX_INTR_HPD_PULSE | \
+ XILINX_DP_TX_INTR_EXT_PKT_TXD | \
+ XILINX_DP_TX_INTR_LIV_ABUF_UNDRFLW | \
+ XILINX_DP_TX_INTR_VBLANK_START | \
+ XILINX_DP_TX_INTR_CHBUF_UNDERFLW_MASK | \
+ XILINX_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+#define XILINX_DP_TX_REPLY_DATA_CNT 0x148
+#define XILINX_DP_SUB_TX_INTR_STATUS 0x3a0
+#define XILINX_DP_SUB_TX_INTR_MASK 0x3a4
+#define XILINX_DP_SUB_TX_INTR_EN 0x3a8
+#define XILINX_DP_SUB_TX_INTR_DS 0x3ac
+
+/* Main stream attribute registers */
+#define XILINX_DP_TX_MAIN_STREAM_HTOTAL 0x180
+#define XILINX_DP_TX_MAIN_STREAM_VTOTAL 0x184
+#define XILINX_DP_TX_MAIN_STREAM_POLARITY 0x188
+#define XILINX_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT 0
+#define XILINX_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT 1
+#define XILINX_DP_TX_MAIN_STREAM_HSWIDTH 0x18c
+#define XILINX_DP_TX_MAIN_STREAM_VSWIDTH 0x190
+#define XILINX_DP_TX_MAIN_STREAM_HRES 0x194
+#define XILINX_DP_TX_MAIN_STREAM_VRES 0x198
+#define XILINX_DP_TX_MAIN_STREAM_HSTART 0x19c
+#define XILINX_DP_TX_MAIN_STREAM_VSTART 0x1a0
+#define XILINX_DP_TX_MAIN_STREAM_MISC0 0x1a4
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_SYNC BIT(0)
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_FORMAT_SHIFT 1
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_DYNAMIC_RANGE BIT(3)
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_YCBCR_COLRIMETRY BIT(4)
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_BPC_SHIFT 5
+#define XILINX_DP_TX_MAIN_STREAM_MISC1 0x1a8
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_INTERLACED_VERT BIT(0)
+#define XILINX_DP_TX_MAIN_STREAM_MISC0_STEREO_VID_SHIFT 1
+#define XILINX_DP_TX_M_VID 0x1ac
+#define XILINX_DP_TX_TRANSFER_UNIT_SIZE 0x1b0
+#define XILINX_DP_TX_DEF_TRANSFER_UNIT_SIZE 64
+#define XILINX_DP_TX_N_VID 0x1b4
+#define XILINX_DP_TX_USER_PIXEL_WIDTH 0x1b8
+#define XILINX_DP_TX_USER_DATA_CNT_PER_LANE 0x1bc
+#define XILINX_DP_TX_MIN_BYTES_PER_TU 0x1c4
+#define XILINX_DP_TX_FRAC_BYTES_PER_TU 0x1c8
+#define XILINX_DP_TX_INIT_WAIT 0x1cc
+
+/* PHY configuration and status registers */
+#define XILINX_DP_TX_PHY_CONFIG 0x200
+#define XILINX_DP_TX_PHY_CONFIG_PHY_RESET BIT(0)
+#define XILINX_DP_TX_PHY_CONFIG_GTTX_RESET BIT(1)
+#define XILINX_DP_TX_PHY_CONFIG_PHY_PMA_RESET BIT(8)
+#define XILINX_DP_TX_PHY_CONFIG_PHY_PCS_RESET BIT(9)
+#define XILINX_DP_TX_PHY_CONFIG_ALL_RESET (XILINX_DP_TX_PHY_CONFIG_PHY_RESET | \
+ XILINX_DP_TX_PHY_CONFIG_GTTX_RESET | \
+ XILINX_DP_TX_PHY_CONFIG_PHY_PMA_RESET | \
+ XILINX_DP_TX_PHY_CONFIG_PHY_PCS_RESET)
+#define XILINX_DP_TX_PHY_PREEMPHASIS_LANE_0 0x210
+#define XILINX_DP_TX_PHY_PREEMPHASIS_LANE_1 0x214
+#define XILINX_DP_TX_PHY_PREEMPHASIS_LANE_2 0x218
+#define XILINX_DP_TX_PHY_PREEMPHASIS_LANE_3 0x21c
+#define XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_0 0x220
+#define XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_1 0x224
+#define XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_2 0x228
+#define XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_3 0x22c
+#define XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING 0x234
+#define XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162 0x1
+#define XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270 0x3
+#define XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540 0x5
+#define XILINX_DP_TX_PHY_POWER_DOWN 0x238
+#define XILINX_DP_TX_PHY_POWER_DOWN_LANE_0 BIT(0)
+#define XILINX_DP_TX_PHY_POWER_DOWN_LANE_1 BIT(1)
+#define XILINX_DP_TX_PHY_POWER_DOWN_LANE_2 BIT(2)
+#define XILINX_DP_TX_PHY_POWER_DOWN_LANE_3 BIT(3)
+#define XILINX_DP_TX_PHY_POWER_DOWN_ALL 0xf
+#define XILINX_DP_TX_PHY_PRECURSOR_LANE_0 0x23c
+#define XILINX_DP_TX_PHY_PRECURSOR_LANE_1 0x240
+#define XILINX_DP_TX_PHY_PRECURSOR_LANE_2 0x244
+#define XILINX_DP_TX_PHY_PRECURSOR_LANE_3 0x248
+#define XILINX_DP_TX_PHY_POSTCURSOR_LANE_0 0x24c
+#define XILINX_DP_TX_PHY_POSTCURSOR_LANE_1 0x250
+#define XILINX_DP_TX_PHY_POSTCURSOR_LANE_2 0x254
+#define XILINX_DP_TX_PHY_POSTCURSOR_LANE_3 0x258
+#define XILINX_DP_SUB_TX_PHY_PRECURSOR_LANE_0 0x24c
+#define XILINX_DP_SUB_TX_PHY_PRECURSOR_LANE_1 0x250
+#define XILINX_DP_TX_PHY_STATUS 0x280
+#define XILINX_DP_TX_PHY_STATUS_PLL_LOCKED_SHIFT 4
+#define XILINX_DP_TX_PHY_STATUS_FPGA_PLL_LOCKED BIT(6)
+
+/* Audio registers */
+#define XILINX_DP_TX_AUDIO_CONTROL 0x300
+#define XILINX_DP_TX_AUDIO_CHANNELS 0x304
+#define XILINX_DP_TX_AUDIO_INFO_DATA 0x308
+#define XILINX_DP_TX_AUDIO_M_AUD 0x328
+#define XILINX_DP_TX_AUDIO_N_AUD 0x32c
+#define XILINX_DP_TX_AUDIO_EXT_DATA 0x330
+
+#define XILINX_DP_MISC0_RGB (0)
+#define XILINX_DP_MISC0_YCRCB_422 (5 << 1)
+#define XILINX_DP_MISC0_YCRCB_444 (6 << 1)
+#define XILINX_DP_MISC0_FORMAT_MASK 0xe
+#define XILINX_DP_MISC0_BPC_6 (0 << 5)
+#define XILINX_DP_MISC0_BPC_8 (1 << 5)
+#define XILINX_DP_MISC0_BPC_10 (2 << 5)
+#define XILINX_DP_MISC0_BPC_12 (3 << 5)
+#define XILINX_DP_MISC0_BPC_16 (4 << 5)
+#define XILINX_DP_MISC0_BPC_MASK 0xe0
+#define XILINX_DP_MISC1_Y_ONLY (1 << 7)
+
+#define DP_REDUCED_BIT_RATE 162000
+#define DP_HIGH_BIT_RATE 270000
+#define DP_HIGH_BIT_RATE2 540000
+#define DP_MAX_TRAINING_TRIES 5
+#define DP_MAX_LANES 4
+
+enum dp_version {
+ DP_V1_1A = 0x11,
+ DP_V1_2 = 0x12
+};
+
+/**
+ * struct xilinx_drm_dp_link_config - Common link config between source and sink
+ * @max_rate: maximum link rate
+ * @max_lanes: maximum number of lanes
+ */
+struct xilinx_drm_dp_link_config {
+ int max_rate;
+ u8 max_lanes;
+};
+
+/**
+ * struct xilinx_drm_dp_mode - Configured mode of DisplayPort
+ * @bw_code: code for bandwidth(link rate)
+ * @lane_cnt: number of lanes
+ * @pclock: pixel clock frequency of current mode
+ */
+struct xilinx_drm_dp_mode {
+ u8 bw_code;
+ u8 lane_cnt;
+ int pclock;
+};
+
+/**
+ * struct xilinx_drm_dp_config - Configuration of DisplayPort from DTS
+ * @dp_version: DisplayPort protocol version
+ * @max_lanes: max number of lanes
+ * @max_link_rate: max link rate
+ * @max_bpc: maximum bits-per-color
+ * @max_pclock: maximum pixel clock rate
+ * @enable_yonly: enable yonly color space logic
+ * @enable_ycrcb: enable ycrcb color space logic
+ * @misc0: misc0 configuration (per DP v1.2 spec)
+ * @misc1: misc1 configuration (per DP v1.2 spec)
+ * @bpp: bits per pixel
+ */
+struct xilinx_drm_dp_config {
+ enum dp_version dp_version;
+ u32 max_lanes;
+ u32 max_link_rate;
+ u32 max_bpc;
+ u32 max_pclock;
+ bool enable_yonly;
+ bool enable_ycrcb;
+
+ u8 misc0;
+ u8 misc1;
+ u8 bpp;
+};
+
+/**
+ * struct xilinx_drm_dp - Xilinx DisplayPort core
+ * @encoder: pointer to the drm encoder structure
+ * @dev: device structure
+ * @iomem: device I/O memory for register access
+ * @config: IP core configuration from DTS
+ * @aux: aux channel
+ * @dp_sub: DisplayPort subsystem
+ * @phy: PHY handles for DP lanes
+ * @aclk: clock source device for internal axi4-lite clock
+ * @aud_clk: clock source device for audio clock
+ * @aud_clk_enabled: if audio clock is enabled
+ * @dpms: current dpms state
+ * @status: the connection status
+ * @dpcd: DP configuration data from currently connected sink device
+ * @link_config: common link configuration between IP core and sink device
+ * @mode: current mode between IP core and sink device
+ * @train_set: set of training data
+ */
+struct xilinx_drm_dp {
+ struct drm_encoder *encoder;
+ struct device *dev;
+ void __iomem *iomem;
+
+ struct xilinx_drm_dp_config config;
+ struct drm_dp_aux aux;
+ struct xilinx_drm_dp_sub *dp_sub;
+ struct phy *phy[DP_MAX_LANES];
+ struct clk *aclk;
+ struct clk *aud_clk;
+ bool aud_clk_enabled;
+
+ int dpms;
+ enum drm_connector_status status;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ struct xilinx_drm_dp_link_config link_config;
+ struct xilinx_drm_dp_mode mode;
+ u8 train_set[DP_MAX_LANES];
+};
+
+static inline struct xilinx_drm_dp *to_dp(struct drm_encoder *encoder)
+{
+ return to_encoder_slave(encoder)->slave_priv;
+}
+
+#define AUX_READ_BIT 0x1
+
+#ifdef CONFIG_DRM_XILINX_DP_DEBUG_FS
+#define XILINX_DP_DEBUGFS_READ_MAX_SIZE 32
+#define XILINX_DP_DEBUGFS_UINT8_MAX_STR "255"
+#define IN_RANGE(x, min, max) ({ \
+ typeof(x) _x = (x); \
+ _x >= (min) && _x <= (max); })
+
+static inline int xilinx_drm_dp_max_rate(int link_rate, u8 lane_num, u8 bpp);
+/* Match xilinx_dp_testcases vs dp_debugfs_reqs[] entry */
+enum xilinx_dp_testcases {
+ DP_TC_LINK_RATE,
+ DP_TC_LANE_COUNT,
+ DP_TC_OUTPUT_FMT,
+ DP_TC_NONE
+};
+
+struct xilinx_dp_debugfs {
+ enum xilinx_dp_testcases testcase;
+ u8 link_rate;
+ u8 lane_cnt;
+ u8 old_output_fmt;
+ struct xilinx_drm_dp *dp;
+};
+
+struct xilinx_dp_debugfs dp_debugfs;
+struct xilinx_dp_debugfs_request {
+ const char *req;
+ enum xilinx_dp_testcases tc;
+ ssize_t (*read_handler)(char **kern_buff);
+ ssize_t (*write_handler)(char **cmd);
+};
+
+static s64 xilinx_dp_debugfs_argument_value(char *arg)
+{
+ s64 value;
+
+ if (!arg)
+ return -1;
+
+ if (!kstrtos64(arg, 0, &value))
+ return value;
+
+ return -1;
+}
+
+static int xilinx_dp_update_output_format(u8 output_fmt, u32 num_colors)
+{
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ struct xilinx_drm_dp_config *config = &dp->config;
+ u32 bpc;
+ u8 bpc_bits = (config->misc0 & XILINX_DP_MISC0_BPC_MASK);
+ bool misc1 = output_fmt & XILINX_DP_MISC1_Y_ONLY ? true : false;
+
+ switch (bpc_bits) {
+ case XILINX_DP_MISC0_BPC_6:
+ bpc = 6;
+ break;
+ case XILINX_DP_MISC0_BPC_8:
+ bpc = 8;
+ break;
+ case XILINX_DP_MISC0_BPC_10:
+ bpc = 10;
+ break;
+ case XILINX_DP_MISC0_BPC_12:
+ bpc = 12;
+ break;
+ case XILINX_DP_MISC0_BPC_16:
+ bpc = 16;
+ break;
+ default:
+ dev_err(dp->dev, "Invalid bpc count for misc0\n");
+ return -EINVAL;
+ }
+
+ /* clear old format */
+ config->misc0 &= ~XILINX_DP_MISC0_FORMAT_MASK;
+ config->misc1 &= ~XILINX_DP_MISC1_Y_ONLY;
+
+ if (misc1) {
+ config->misc1 |= output_fmt;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_MISC1,
+ config->misc1);
+ } else {
+ config->misc0 |= output_fmt;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_MISC0,
+ config->misc0);
+ }
+ config->bpp = num_colors * bpc;
+
+ return 0;
+}
+
+static ssize_t xilinx_dp_debugfs_max_linkrate_write(char **dp_test_arg)
+{
+ char *link_rate_arg;
+ s64 link_rate;
+
+ link_rate_arg = strsep(dp_test_arg, " ");
+ link_rate = xilinx_dp_debugfs_argument_value(link_rate_arg);
+ if (link_rate < 0 || (link_rate != DP_HIGH_BIT_RATE2 &&
+ link_rate != DP_HIGH_BIT_RATE &&
+ link_rate != DP_REDUCED_BIT_RATE))
+ return -EINVAL;
+
+ dp_debugfs.link_rate = drm_dp_link_rate_to_bw_code(link_rate);
+ dp_debugfs.testcase = DP_TC_LINK_RATE;
+
+ return 0;
+}
+
+static ssize_t xilinx_dp_debugfs_max_lanecnt_write(char **dp_test_arg)
+{
+ char *lane_cnt_arg;
+ s64 lane_count;
+
+ lane_cnt_arg = strsep(dp_test_arg, " ");
+ lane_count = xilinx_dp_debugfs_argument_value(lane_cnt_arg);
+ if (lane_count < 0 || !IN_RANGE(lane_count, 1,
+ dp_debugfs.dp->config.max_lanes))
+ return -EINVAL;
+
+ dp_debugfs.lane_cnt = lane_count;
+ dp_debugfs.testcase = DP_TC_LANE_COUNT;
+
+ return 0;
+}
+
+static ssize_t xilinx_dp_debugfs_output_display_format_write(char **dp_test_arg)
+{
+ int ret;
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ char *output_format;
+ u8 output_fmt;
+ u32 num_colors;
+
+ /* Read the value from an user value */
+ output_format = strsep(dp_test_arg, " ");
+
+ if (strncmp(output_format, "rgb", 3) == 0) {
+ output_fmt = XILINX_DP_MISC0_RGB;
+ num_colors = 3;
+ } else if (strncmp(output_format, "ycbcr422", 8) == 0) {
+ output_fmt = XILINX_DP_MISC0_YCRCB_422;
+ num_colors = 2;
+ } else if (strncmp(output_format, "ycbcr444", 8) == 0) {
+ output_fmt = XILINX_DP_MISC0_YCRCB_444;
+ num_colors = 3;
+ } else if (strncmp(output_format, "yonly", 5) == 0) {
+ output_fmt = XILINX_DP_MISC1_Y_ONLY;
+ num_colors = 1;
+ } else {
+ dev_err(dp->dev, "Invalid output format\n");
+ return -EINVAL;
+ }
+
+ if (dp->config.misc1 & XILINX_DP_MISC1_Y_ONLY)
+ dp_debugfs.old_output_fmt = XILINX_DP_MISC1_Y_ONLY;
+ else
+ dp_debugfs.old_output_fmt = dp->config.misc0 &
+ XILINX_DP_MISC0_FORMAT_MASK;
+
+ ret = xilinx_dp_update_output_format(output_fmt, num_colors);
+ if (!ret)
+ dp_debugfs.testcase = DP_TC_OUTPUT_FMT;
+ return ret;
+}
+
+static ssize_t xilinx_dp_debugfs_max_linkrate_read(char **kern_buff)
+{
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ size_t output_str_len;
+ u8 dpcd_link_bw;
+ int ret;
+
+ dp_debugfs.testcase = DP_TC_NONE;
+ dp_debugfs.link_rate = 0;
+
+ /* Getting Sink Side Link Rate */
+ ret = drm_dp_dpcd_readb(&dp->aux, DP_LINK_BW_SET, &dpcd_link_bw);
+ if (ret < 0) {
+ dev_err(dp->dev, "Failed to read link rate via AUX.\n");
+ kfree(*kern_buff);
+ return ret;
+ }
+
+ output_str_len = strlen(XILINX_DP_DEBUGFS_UINT8_MAX_STR);
+ output_str_len = min_t(size_t, XILINX_DP_DEBUGFS_READ_MAX_SIZE,
+ output_str_len);
+ snprintf(*kern_buff, output_str_len, "%u", dpcd_link_bw);
+
+ return 0;
+}
+
+static ssize_t xilinx_dp_debugfs_max_lanecnt_read(char **kern_buff)
+{
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ size_t output_str_len;
+ u8 dpcd_lane_cnt;
+ int ret;
+
+ dp_debugfs.testcase = DP_TC_NONE;
+ dp_debugfs.lane_cnt = 0;
+
+ /* Getting Sink Side Lane Count */
+ ret = drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &dpcd_lane_cnt);
+ if (ret < 0) {
+ dev_err(dp->dev, "Failed to read link rate via AUX.\n");
+ kfree(*kern_buff);
+ return ret;
+ }
+
+ dpcd_lane_cnt &= DP_LANE_COUNT_MASK;
+ output_str_len = strlen(XILINX_DP_DEBUGFS_UINT8_MAX_STR);
+ output_str_len = min_t(size_t, XILINX_DP_DEBUGFS_READ_MAX_SIZE,
+ output_str_len);
+ snprintf(*kern_buff, output_str_len, "%u", dpcd_lane_cnt);
+
+ return 0;
+}
+
+static ssize_t
+xilinx_dp_debugfs_output_display_format_read(char **kern_buff)
+{
+ int ret;
+ struct xilinx_drm_dp *dp = dp_debugfs.dp;
+ u8 old_output_fmt = dp_debugfs.old_output_fmt;
+ size_t output_str_len;
+ u32 num_colors;
+
+ dp_debugfs.testcase = DP_TC_NONE;
+
+ if (old_output_fmt == XILINX_DP_MISC0_RGB) {
+ num_colors = 3;
+ } else if (old_output_fmt == XILINX_DP_MISC0_YCRCB_422) {
+ num_colors = 2;
+ } else if (old_output_fmt == XILINX_DP_MISC0_YCRCB_444) {
+ num_colors = 3;
+ } else if (old_output_fmt == XILINX_DP_MISC1_Y_ONLY) {
+ num_colors = 1;
+ } else {
+ dev_err(dp->dev, "Invalid output format in misc0\n");
+ return -EINVAL;
+ }
+
+ ret = xilinx_dp_update_output_format(old_output_fmt, num_colors);
+ if (ret)
+ return ret;
+
+ output_str_len = strlen("Success");
+ output_str_len = min_t(size_t, XILINX_DP_DEBUGFS_READ_MAX_SIZE,
+ output_str_len);
+ snprintf(*kern_buff, output_str_len, "%s", "Success");
+
+ return 0;
+}
+
+/* Match xilinx_dp_testcases vs dp_debugfs_reqs[] entry */
+struct xilinx_dp_debugfs_request dp_debugfs_reqs[] = {
+ {"LINK_RATE", DP_TC_LINK_RATE,
+ xilinx_dp_debugfs_max_linkrate_read,
+ xilinx_dp_debugfs_max_linkrate_write},
+ {"LANE_COUNT", DP_TC_LANE_COUNT,
+ xilinx_dp_debugfs_max_lanecnt_read,
+ xilinx_dp_debugfs_max_lanecnt_write},
+ {"OUTPUT_DISPLAY_FORMAT", DP_TC_OUTPUT_FMT,
+ xilinx_dp_debugfs_output_display_format_read,
+ xilinx_dp_debugfs_output_display_format_write},
+};
+
+static ssize_t xilinx_dp_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff = NULL;
+ size_t kern_buff_len, out_str_len;
+ int ret;
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (*pos != 0)
+ return 0;
+
+ kern_buff = kzalloc(XILINX_DP_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
+ if (!kern_buff) {
+ dp_debugfs.testcase = DP_TC_NONE;
+ return -ENOMEM;
+ }
+
+ if (dp_debugfs.testcase == DP_TC_NONE) {
+ out_str_len = strlen("No testcase executed");
+ out_str_len = min_t(size_t, XILINX_DP_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(kern_buff, out_str_len, "%s", "No testcase executed");
+ } else {
+ ret = dp_debugfs_reqs[dp_debugfs.testcase].read_handler(
+ &kern_buff);
+ if (ret) {
+ kfree(kern_buff);
+ return ret;
+ }
+ }
+
+ kern_buff_len = strlen(kern_buff);
+ size = min(size, kern_buff_len);
+
+ ret = copy_to_user(buf, kern_buff, size);
+
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static ssize_t
+xilinx_dp_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff, *kern_buff_start;
+ char *dp_test_req;
+ int ret;
+ int i;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ if (dp_debugfs.testcase != DP_TC_NONE)
+ return -EBUSY;
+
+ kern_buff = kzalloc(size, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+ kern_buff_start = kern_buff;
+
+ ret = strncpy_from_user(kern_buff, buf, size);
+ if (ret < 0) {
+ kfree(kern_buff_start);
+ return ret;
+ }
+
+ /* Read the testcase name and argument from an user request */
+ dp_test_req = strsep(&kern_buff, " ");
+
+ for (i = 0; i < ARRAY_SIZE(dp_debugfs_reqs); i++) {
+ if (!strcasecmp(dp_test_req, dp_debugfs_reqs[i].req))
+ if (!dp_debugfs_reqs[i].write_handler(&kern_buff)) {
+ kfree(kern_buff_start);
+ return size;
+ }
+ }
+
+ kfree(kern_buff_start);
+ return -EINVAL;
+}
+
+static const struct file_operations fops_xilinx_dp_dbgfs = {
+ .owner = THIS_MODULE,
+ .read = xilinx_dp_debugfs_read,
+ .write = xilinx_dp_debugfs_write,
+};
+
+static int xilinx_dp_debugfs_init(struct xilinx_drm_dp *dp)
+{
+ int err;
+ struct dentry *xilinx_dp_debugfs_dir, *xilinx_dp_debugfs_file;
+
+ dp_debugfs.testcase = DP_TC_NONE;
+ dp_debugfs.dp = dp;
+
+ xilinx_dp_debugfs_dir = debugfs_create_dir("dp", NULL);
+ if (!xilinx_dp_debugfs_dir) {
+ dev_err(dp->dev, "debugfs_create_dir failed\n");
+ return -ENODEV;
+ }
+
+ xilinx_dp_debugfs_file =
+ debugfs_create_file("testcase", 0444, xilinx_dp_debugfs_dir,
+ NULL, &fops_xilinx_dp_dbgfs);
+ if (!xilinx_dp_debugfs_file) {
+ dev_err(dp->dev, "debugfs_create_file testcase failed\n");
+ err = -ENODEV;
+ goto err_dbgfs;
+ }
+ return 0;
+
+err_dbgfs:
+ debugfs_remove_recursive(xilinx_dp_debugfs_dir);
+ xilinx_dp_debugfs_dir = NULL;
+ return err;
+}
+
+static void xilinx_dp_debugfs_mode_config(struct xilinx_drm_dp *dp, u8 *lanes,
+ u8 *rate_code, int pclock)
+{
+ int debugfs_rate = 0;
+ u8 bpp = dp->config.bpp;
+
+ if (!dp_debugfs.link_rate && !dp_debugfs.lane_cnt)
+ return;
+
+ if (dp_debugfs.link_rate) {
+ debugfs_rate = min(dp_debugfs.link_rate, *rate_code);
+ debugfs_rate =
+ drm_dp_bw_code_to_link_rate(debugfs_rate);
+ debugfs_rate =
+ xilinx_drm_dp_max_rate(debugfs_rate, *lanes, bpp);
+ }
+
+ if (dp_debugfs.lane_cnt) {
+ u8 lane;
+
+ lane = min(dp_debugfs.lane_cnt, *lanes);
+ debugfs_rate =
+ xilinx_drm_dp_max_rate(debugfs_rate, lane, bpp);
+ }
+
+ if (pclock > debugfs_rate) {
+ dev_dbg(dp->dev, "debugfs could't configure link values\n");
+ return;
+ }
+
+ if (dp_debugfs.link_rate)
+ *rate_code = dp_debugfs.link_rate;
+ if (dp_debugfs.lane_cnt)
+ *lanes = dp_debugfs.lane_cnt;
+
+}
+#else
+static int xilinx_dp_debugfs_init(struct xilinx_drm_dp *dp)
+{
+ return 0;
+}
+
+static void xilinx_dp_debugfs_mode_config(struct xilinx_drm_dp *dp, u8 *lanes,
+ u8 *rate_code, int pclock)
+{
+}
+#endif /* DRM_XILINX_DP_DEBUG_FS */
+
+/**
+ * xilinx_drm_dp_aux_cmd_submit - Submit aux command
+ * @dp: DisplayPort IP core structure
+ * @cmd: aux command
+ * @addr: aux address
+ * @buf: buffer for command data
+ * @bytes: number of bytes for @buf
+ * @reply: reply code to be returned
+ *
+ * Submit an aux command. All aux related commands, native or i2c aux
+ * read/write, are submitted through this function. The function is mapped to
+ * the transfer function of struct drm_dp_aux. This function involves in
+ * multiple register reads/writes, thus synchronization is needed, and it is
+ * done by drm_dp_helper using @hw_mutex. The calling thread goes into sleep
+ * if there's no immediate reply to the command submission. The reply code is
+ * returned at @reply if @reply != NULL.
+ *
+ * Return: 0 if the command is submitted properly, or corresponding error code:
+ * -EBUSY when there is any request already being processed
+ * -ETIMEDOUT when receiving reply is timed out
+ * -EIO when received bytes are less than requested
+ */
+static int xilinx_drm_dp_aux_cmd_submit(struct xilinx_drm_dp *dp, u32 cmd,
+ u16 addr, u8 *buf, u8 bytes, u8 *reply)
+{
+ bool is_read = (cmd & AUX_READ_BIT) ? true : false;
+ void __iomem *iomem = dp->iomem;
+ u32 reg, i;
+
+ reg = xilinx_drm_readl(iomem, XILINX_DP_TX_INTR_SIGNAL_STATE);
+ if (reg & XILINX_DP_TX_INTR_SIGNAL_STATE_REQUEST)
+ return -EBUSY;
+
+ xilinx_drm_writel(iomem, XILINX_DP_TX_AUX_ADDRESS, addr);
+
+ if (!is_read)
+ for (i = 0; i < bytes; i++)
+ xilinx_drm_writel(iomem, XILINX_DP_TX_AUX_WRITE_FIFO,
+ buf[i]);
+
+ reg = cmd << XILINX_DP_TX_AUX_COMMAND_CMD_SHIFT;
+ if (!buf || !bytes)
+ reg |= XILINX_DP_TX_AUX_COMMAND_ADDRESS_ONLY;
+ else
+ reg |= (bytes - 1) << XILINX_DP_TX_AUX_COMMAND_BYTES_SHIFT;
+ xilinx_drm_writel(iomem, XILINX_DP_TX_AUX_COMMAND, reg);
+
+ /* Wait for reply to be delivered upto 2ms */
+ for (i = 0; ; i++) {
+ reg = xilinx_drm_readl(iomem, XILINX_DP_TX_INTR_SIGNAL_STATE);
+
+ if (reg & XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY)
+ break;
+
+ if (reg & XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT ||
+ i == 2)
+ return -ETIMEDOUT;
+
+ usleep_range(1000, 1100);
+ }
+
+ reg = xilinx_drm_readl(iomem, XILINX_DP_TX_AUX_REPLY_CODE);
+ if (reply)
+ *reply = reg;
+
+ if (is_read &&
+ (reg == XILINX_DP_TX_AUX_REPLY_CODE_AUX_ACK ||
+ reg == XILINX_DP_TX_AUX_REPLY_CODE_I2C_ACK)) {
+ reg = xilinx_drm_readl(iomem, XILINX_DP_TX_REPLY_DATA_CNT);
+ if ((reg & XILINX_DP_TX_AUX_REPLY_CNT_MASK) != bytes)
+ return -EIO;
+
+ for (i = 0; i < bytes; i++)
+ buf[i] = xilinx_drm_readl(iomem,
+ XILINX_DP_TX_AUX_REPLY_DATA);
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_phy_ready - Check if PHY is ready
+ * @dp: DisplayPort IP core structure
+ *
+ * Check if PHY is ready. If PHY is not ready, wait 1ms to check for 100 times.
+ * This amount of delay was suggested by IP designer.
+ *
+ * Return: 0 if PHY is ready, or -ENODEV if PHY is not ready.
+ */
+static int xilinx_drm_dp_phy_ready(struct xilinx_drm_dp *dp)
+{
+ u32 i, reg, ready, lane;
+
+ lane = dp->config.max_lanes;
+ ready = (1 << lane) - 1;
+ if (!dp->dp_sub)
+ ready |= XILINX_DP_TX_PHY_STATUS_FPGA_PLL_LOCKED;
+
+ /* Wait for 100 * 1ms. This should be enough time for PHY to be ready */
+ for (i = 0; ; i++) {
+ reg = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_PHY_STATUS);
+ if ((reg & ready) == ready)
+ return 0;
+
+ if (i == 100) {
+ DRM_ERROR("PHY isn't ready\n");
+ return -ENODEV;
+ }
+
+ usleep_range(1000, 1100);
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_max_rate - Calculate and return available max pixel clock
+ * @link_rate: link rate (Kilo-bytes / sec)
+ * @lane_num: number of lanes
+ * @bpp: bits per pixel
+ *
+ * Return: max pixel clock (KHz) supported by current link config.
+ */
+static inline int xilinx_drm_dp_max_rate(int link_rate, u8 lane_num, u8 bpp)
+{
+ return link_rate * lane_num * 8 / bpp;
+}
+
+/**
+ * xilinx_drm_dp_mode_configure - Configure the link values
+ * @dp: DisplayPort IP core structure
+ * @pclock: pixel clock for requested display mode
+ * @current_bw: current link rate
+ *
+ * Find the link configuration values, rate and lane count for requested pixel
+ * clock @pclock. The @pclock is stored in the mode to be used in other
+ * functions later. The returned rate is downshifted from the current rate
+ * @current_bw.
+ *
+ * Return: Current link rate code, or -EINVAL.
+ */
+static int xilinx_drm_dp_mode_configure(struct xilinx_drm_dp *dp, int pclock,
+ u8 current_bw)
+{
+ int max_rate = dp->link_config.max_rate;
+ u8 bws[3] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 max_link_rate_code = drm_dp_link_rate_to_bw_code(max_rate);
+ u8 bpp = dp->config.bpp;
+ u8 lane_cnt;
+ s8 i;
+
+ if (current_bw == DP_LINK_BW_1_62)
+ return -EINVAL;
+
+ xilinx_dp_debugfs_mode_config(dp, &max_lanes, &max_link_rate_code,
+ pclock);
+
+ for (i = ARRAY_SIZE(bws) - 1; i >= 0; i--) {
+ if (current_bw && bws[i] >= current_bw)
+ continue;
+
+ if (bws[i] <= max_link_rate_code)
+ break;
+ }
+
+ for (lane_cnt = 1; lane_cnt <= max_lanes; lane_cnt <<= 1) {
+ int bw;
+ u32 rate;
+
+ bw = drm_dp_bw_code_to_link_rate(bws[i]);
+ rate = xilinx_drm_dp_max_rate(bw, lane_cnt, bpp);
+ if (pclock <= rate) {
+ dp->mode.bw_code = bws[i];
+ dp->mode.lane_cnt = lane_cnt;
+ dp->mode.pclock = pclock;
+ return dp->mode.bw_code;
+ }
+ }
+
+ dev_dbg(dp->dev, "failed to configure link values\n");
+
+ return -EINVAL;
+}
+
+/**
+ * xilinx_drm_dp_adjust_train - Adjust train values
+ * @dp: DisplayPort IP core structure
+ * @link_status: link status from sink which contains requested training values
+ */
+static void xilinx_drm_dp_adjust_train(struct xilinx_drm_dp *dp,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 *train_set = dp->train_set;
+ u8 voltage = 0, preemphasis = 0;
+ u8 max_preemphasis;
+ u8 i;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ u8 v = drm_dp_get_adjust_request_voltage(link_status, i);
+ u8 p = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+
+ if (v > voltage)
+ voltage = v;
+
+ if (p > preemphasis)
+ preemphasis = p;
+ }
+
+ if (voltage >= DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
+ voltage |= DP_TRAIN_MAX_SWING_REACHED;
+
+ max_preemphasis = (dp->dp_sub) ? DP_TRAIN_PRE_EMPH_LEVEL_2 :
+ DP_TRAIN_PRE_EMPH_LEVEL_3;
+
+ if (preemphasis >= max_preemphasis)
+ preemphasis |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++)
+ train_set[i] = voltage | preemphasis;
+}
+
+/**
+ * xilinx_drm_dp_update_vs_emph - Update the training values
+ * @dp: DisplayPort IP core structure
+ *
+ * Update the training values based on the request from sink. The mapped values
+ * are predefined, and values(vs, pe, pc) are from the device manual.
+ *
+ * Return: 0 if vs and emph are updated successfully, or the error code returned
+ * by drm_dp_dpcd_write().
+ */
+static int xilinx_drm_dp_update_vs_emph(struct xilinx_drm_dp *dp)
+{
+ u8 *train_set = dp->train_set;
+ u8 i, v_level, p_level;
+ int ret;
+ static u8 vs[4][4] = { { 0x2a, 0x27, 0x24, 0x20 },
+ { 0x27, 0x23, 0x20, 0xff },
+ { 0x24, 0x20, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff } };
+ static u8 pe[4][4] = { { 0x2, 0x2, 0x2, 0x2 },
+ { 0x1, 0x1, 0x1, 0xff },
+ { 0x0, 0x0, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff } };
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set,
+ dp->mode.lane_cnt);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ v_level = (train_set[i] & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ p_level = (train_set[i] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if (dp->phy[i]) {
+ u32 reg = XILINX_DP_SUB_TX_PHY_PRECURSOR_LANE_0 + i * 4;
+
+ xpsgtr_margining_factor(dp->phy[i], p_level, v_level);
+ xpsgtr_override_deemph(dp->phy[i], p_level, v_level);
+ xilinx_drm_writel(dp->iomem, reg, 0x2);
+ } else {
+ u32 reg;
+
+ reg = XILINX_DP_TX_PHY_VOLTAGE_DIFF_LANE_0 + i + 4;
+ xilinx_drm_writel(dp->iomem, reg, vs[p_level][v_level]);
+ reg = XILINX_DP_TX_PHY_PRECURSOR_LANE_0 + i + 4;
+ xilinx_drm_writel(dp->iomem, reg, pe[p_level][v_level]);
+ reg = XILINX_DP_TX_PHY_POSTCURSOR_LANE_0 + i + 4;
+ xilinx_drm_writel(dp->iomem, reg, 0);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_link_train_cr - Train clock recovery
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if clock recovery train is done successfully, or corresponding
+ * error code.
+ */
+static int xilinx_drm_dp_link_train_cr(struct xilinx_drm_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 vs = 0, tries = 0;
+ u16 max_tries, i;
+ bool cr_done;
+ int ret;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ /* 256 loops should be maximum iterations for 4 lanes and 4 values.
+ * So, This loop should exit before 512 iterations
+ */
+ for (max_tries = 0; max_tries < 512; max_tries++) {
+ ret = xilinx_drm_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_clock_recovery_delay(dp->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ cr_done = drm_dp_clock_recovery_ok(link_status, lane_cnt);
+ if (cr_done)
+ break;
+
+ for (i = 0; i < lane_cnt; i++)
+ if (!(dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED))
+ break;
+
+ if (i == lane_cnt)
+ break;
+
+ if ((dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == vs)
+ tries++;
+ else
+ tries = 0;
+
+ if (tries == DP_MAX_TRAINING_TRIES)
+ break;
+
+ vs = dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ xilinx_drm_dp_adjust_train(dp, link_status);
+ }
+
+ if (!cr_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_link_train_ce - Train channel equalization
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if channel equalization train is done successfully, or
+ * corresponding error code.
+ */
+static int xilinx_drm_dp_link_train_ce(struct xilinx_drm_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 pat, tries;
+ int ret;
+ bool ce_done;
+
+ if (dp->config.dp_version == DP_V1_2 &&
+ dp->dpcd[DP_DPCD_REV] >= DP_V1_2 &&
+ dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED)
+ pat = DP_TRAINING_PATTERN_3;
+ else
+ pat = DP_TRAINING_PATTERN_2;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_TRAINING_PATTERN_SET, pat);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ pat | DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ for (tries = 0; tries < DP_MAX_TRAINING_TRIES; tries++) {
+ ret = xilinx_drm_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_channel_eq_delay(dp->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ ce_done = drm_dp_channel_eq_ok(link_status, lane_cnt);
+ if (ce_done)
+ break;
+
+ xilinx_drm_dp_adjust_train(dp, link_status);
+ }
+
+ if (!ce_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_link_train - Train the link
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if all trains are done successfully, or corresponding error code.
+ */
+static int xilinx_drm_dp_train(struct xilinx_drm_dp *dp)
+{
+ u32 reg;
+ u8 bw_code = dp->mode.bw_code;
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 aux_lane_cnt = lane_cnt;
+ bool enhanced;
+ int ret;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_LANE_CNT_SET, lane_cnt);
+
+ enhanced = drm_dp_enhanced_frame_cap(dp->dpcd);
+ if (enhanced) {
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_ENHANCED_FRAME_EN, 1);
+ aux_lane_cnt |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+
+ if (dp->dpcd[3] & 0x1) {
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_DOWNSPREAD_CTL, 1);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL,
+ DP_SPREAD_AMP_0_5);
+ } else {
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_DOWNSPREAD_CTL, 0);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL, 0);
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET, aux_lane_cnt);
+ if (ret < 0) {
+ DRM_ERROR("failed to set lane count\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+ DP_SET_ANSI_8B10B);
+ if (ret < 0) {
+ DRM_ERROR("failed to set ANSI 8B/10B encoding\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LINK_BW_SET, bw_code);
+ if (ret < 0) {
+ DRM_ERROR("failed to set DP bandwidth\n");
+ return ret;
+ }
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_LINK_BW_SET, bw_code);
+
+ switch (bw_code) {
+ case DP_LINK_BW_1_62:
+ reg = XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162;
+ break;
+ case DP_LINK_BW_2_7:
+ reg = XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270;
+ break;
+ case DP_LINK_BW_5_4:
+ default:
+ reg = XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540;
+ break;
+ }
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING,
+ reg);
+ ret = xilinx_drm_dp_phy_ready(dp);
+ if (ret < 0)
+ return ret;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_SCRAMBLING_DISABLE, 1);
+
+ memset(dp->train_set, 0, 4);
+
+ ret = xilinx_drm_dp_link_train_cr(dp);
+ if (ret)
+ return ret;
+
+ ret = xilinx_drm_dp_link_train_ce(dp);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ if (ret < 0) {
+ DRM_ERROR("failed to disable training pattern\n");
+ return ret;
+ }
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_SCRAMBLING_DISABLE, 0);
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_train_loop - Downshift the link rate during training
+ * @dp: DisplayPort IP core structure
+ *
+ * Train the link by downshifting the link rate if training is not successful.
+ */
+static void xilinx_drm_dp_train_loop(struct xilinx_drm_dp *dp)
+{
+ struct xilinx_drm_dp_mode *mode = &dp->mode;
+ u8 bw = mode->bw_code;
+ int ret;
+
+ do {
+ if (dp->status == connector_status_disconnected)
+ return;
+
+ ret = xilinx_drm_dp_train(dp);
+ if (!ret)
+ return;
+
+ ret = xilinx_drm_dp_mode_configure(dp, mode->pclock, bw);
+ if (ret < 0)
+ return;
+ bw = ret;
+ } while (bw >= DP_LINK_BW_1_62);
+
+ DRM_ERROR("failed to train the DP link\n");
+}
+
+/**
+ * xilinx_drm_dp_init_aux - Initialize the DP aux
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the DP aux. The aux clock is derived from the axi clock, so
+ * this function gets the axi clock frequency and calculates the filter
+ * value. Additionally, the interrupts and transmitter are enabled.
+ *
+ * Return: 0 on success, error value otherwise
+ */
+static int xilinx_drm_dp_init_aux(struct xilinx_drm_dp *dp)
+{
+ int clock_rate;
+ u32 reg, w;
+
+ clock_rate = clk_get_rate(dp->aclk);
+ if (clock_rate < XILINX_DP_TX_CLK_DIVIDER_MHZ) {
+ DRM_ERROR("aclk should be higher than 1MHz\n");
+ return -EINVAL;
+ }
+
+ /* Allowable values for this register are: 8, 16, 24, 32, 40, 48 */
+ for (w = 8; w <= 48; w += 8) {
+ /* AUX pulse width should be between 0.4 to 0.6 usec */
+ if (w >= (4 * clock_rate / 10000000) &&
+ w <= (6 * clock_rate / 10000000))
+ break;
+ }
+
+ if (w > 48) {
+ DRM_ERROR("aclk frequency too high\n");
+ return -EINVAL;
+ }
+ reg = w << XILINX_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT;
+
+ reg |= clock_rate / XILINX_DP_TX_CLK_DIVIDER_MHZ;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_CLK_DIVIDER, reg);
+
+ if (dp->dp_sub)
+ xilinx_drm_writel(dp->iomem, XILINX_DP_SUB_TX_INTR_EN,
+ XILINX_DP_TX_INTR_ALL);
+ else
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_INTR_MASK,
+ (u32)~XILINX_DP_TX_INTR_ALL);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_ENABLE, 1);
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_init_phy - Initialize the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the phy.
+ *
+ * Return: 0 if the phy instances are initialized correctly, or the error code
+ * returned from the callee functions.
+ */
+static int xilinx_drm_dp_init_phy(struct xilinx_drm_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->config.max_lanes; i++) {
+ ret = phy_init(dp->phy[i]);
+ if (ret) {
+ dev_err(dp->dev, "failed to init phy lane %d\n", i);
+ return ret;
+ }
+ }
+
+ if (dp->dp_sub)
+ xilinx_drm_writel(dp->iomem, XILINX_DP_SUB_TX_INTR_DS,
+ XILINX_DP_TX_INTR_ALL);
+ else
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_INTR_MASK,
+ XILINX_DP_TX_INTR_ALL);
+
+ xilinx_drm_clr(dp->iomem, XILINX_DP_TX_PHY_CONFIG,
+ XILINX_DP_TX_PHY_CONFIG_ALL_RESET);
+
+ /* Wait for PLL to be locked for the primary (1st) */
+ if (dp->phy[0]) {
+ ret = xpsgtr_wait_pll_lock(dp->phy[0]);
+ if (ret) {
+ dev_err(dp->dev, "failed to lock pll\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_drm_dp_exit_phy - Exit the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Exit the phy.
+ */
+static void xilinx_drm_dp_exit_phy(struct xilinx_drm_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->config.max_lanes; i++) {
+ ret = phy_exit(dp->phy[i]);
+ if (ret) {
+ dev_err(dp->dev,
+ "failed to exit phy (%d) %d\n", i, ret);
+ }
+ }
+}
+
+static void xilinx_drm_dp_dpms(struct drm_encoder *encoder, int dpms)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ void __iomem *iomem = dp->iomem;
+ unsigned int i;
+ int ret;
+
+ if (dp->dpms == dpms)
+ return;
+
+ dp->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ pm_runtime_get_sync(dp->dev);
+
+ if (dp->aud_clk && !dp->aud_clk_enabled) {
+ ret = clk_prepare_enable(dp->aud_clk);
+ if (ret) {
+ dev_err(dp->dev, "failed to enable aud_clk\n");
+ } else {
+ xilinx_drm_writel(iomem,
+ XILINX_DP_TX_AUDIO_CONTROL,
+ 1);
+ dp->aud_clk_enabled = true;
+ }
+ }
+ xilinx_drm_writel(iomem, XILINX_DP_TX_PHY_POWER_DOWN, 0);
+
+ if (dp->status == connector_status_connected) {
+ for (i = 0; i < 3; i++) {
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ usleep_range(300, 500);
+ }
+ /* Some monitors take time to wake up properly */
+ msleep(xilinx_drm_dp_power_on_delay_ms);
+ if (ret != 1)
+ dev_dbg(dp->dev, "DP aux failed\n");
+ else
+ xilinx_drm_dp_train_loop(dp);
+ }
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_SW_RESET,
+ XILINX_DP_TX_SW_RESET_ALL);
+ xilinx_drm_writel(iomem, XILINX_DP_TX_ENABLE_MAIN_STREAM, 1);
+
+ return;
+ default:
+ xilinx_drm_writel(iomem, XILINX_DP_TX_ENABLE_MAIN_STREAM, 0);
+ if (dp->status == connector_status_connected) {
+ drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D3);
+ }
+ xilinx_drm_writel(iomem, XILINX_DP_TX_PHY_POWER_DOWN,
+ XILINX_DP_TX_PHY_POWER_DOWN_ALL);
+ if (dp->aud_clk && dp->aud_clk_enabled) {
+ xilinx_drm_writel(iomem, XILINX_DP_TX_AUDIO_CONTROL, 0);
+ clk_disable_unprepare(dp->aud_clk);
+ dp->aud_clk_enabled = false;
+ }
+ pm_runtime_put_sync(dp->dev);
+
+ return;
+ }
+}
+
+static void xilinx_drm_dp_save(struct drm_encoder *encoder)
+{
+ /* no op */
+}
+
+static void xilinx_drm_dp_restore(struct drm_encoder *encoder)
+{
+ /* no op */
+}
+
+#define XILINX_DP_SUB_TX_MIN_H_BACKPORCH 20
+
+static bool xilinx_drm_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ int diff = mode->htotal - mode->hsync_end;
+
+ /*
+ * ZynqMP DP requires horizontal backporch to be greater than 12.
+ * This limitation may conflict with the sink device.
+ */
+ if (dp->dp_sub && diff < XILINX_DP_SUB_TX_MIN_H_BACKPORCH) {
+ int vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+
+ diff = XILINX_DP_SUB_TX_MIN_H_BACKPORCH - diff;
+ adjusted_mode->htotal += diff;
+ adjusted_mode->clock = adjusted_mode->vtotal *
+ adjusted_mode->htotal * vrefresh / 1000;
+ }
+
+ return true;
+}
+
+static int xilinx_drm_dp_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 bpp = dp->config.bpp;
+ u32 max_pclock = dp->config.max_pclock;
+ int max_rate = dp->link_config.max_rate;
+ int rate;
+
+ if (max_pclock && mode->clock > max_pclock)
+ return MODE_CLOCK_HIGH;
+
+ rate = xilinx_drm_dp_max_rate(max_rate, max_lanes, bpp);
+ if (mode->clock > rate)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+/**
+ * xilinx_drm_dp_mode_set_transfer_unit - Set the transfer unit values
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Set the transfer unit, and caculate all transfer unit size related values.
+ * Calculation is based on DP and IP core specification.
+ */
+static void xilinx_drm_dp_mode_set_transfer_unit(struct xilinx_drm_dp *dp,
+ struct drm_display_mode *mode)
+{
+ u32 tu = XILINX_DP_TX_DEF_TRANSFER_UNIT_SIZE;
+ u32 bw, vid_kbytes, avg_bytes_per_tu, init_wait;
+
+ /* Use the max transfer unit size (default) */
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_TRANSFER_UNIT_SIZE, tu);
+
+ vid_kbytes = mode->clock * (dp->config.bpp / 8);
+ bw = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ avg_bytes_per_tu = vid_kbytes * tu / (dp->mode.lane_cnt * bw / 1000);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MIN_BYTES_PER_TU,
+ avg_bytes_per_tu / 1000);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_FRAC_BYTES_PER_TU,
+ avg_bytes_per_tu % 1000);
+
+ /* Configure the initial wait cycle based on transfer unit size */
+ if (tu < (avg_bytes_per_tu / 1000))
+ init_wait = 0;
+ else if ((avg_bytes_per_tu / 1000) <= 4)
+ init_wait = tu;
+ else
+ init_wait = tu - avg_bytes_per_tu / 1000;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_INIT_WAIT, init_wait);
+}
+
+/**
+ * xilinx_drm_dp_mode_set_stream - Configure the main stream
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Configure the main stream based on the requested mode @mode. Calculation is
+ * based on IP core specification.
+ */
+static void xilinx_drm_dp_mode_set_stream(struct xilinx_drm_dp *dp,
+ struct drm_display_mode *mode)
+{
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 reg, wpl;
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_HTOTAL,
+ mode->htotal);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_VTOTAL,
+ mode->vtotal);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_POLARITY,
+ (!!(mode->flags & DRM_MODE_FLAG_PVSYNC) <<
+ XILINX_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT) |
+ (!!(mode->flags & DRM_MODE_FLAG_PHSYNC) <<
+ XILINX_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT));
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_HSWIDTH,
+ mode->hsync_end - mode->hsync_start);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_VSWIDTH,
+ mode->vsync_end - mode->vsync_start);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_HRES,
+ mode->hdisplay);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_VRES,
+ mode->vdisplay);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_HSTART,
+ mode->htotal - mode->hsync_start);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_VSTART,
+ mode->vtotal - mode->vsync_start);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_MISC0,
+ dp->config.misc0);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_MAIN_STREAM_MISC1,
+ dp->config.misc1);
+
+ /* In synchronous mode, set the diviers */
+ if (dp->config.misc0 & XILINX_DP_TX_MAIN_STREAM_MISC0_SYNC) {
+ reg = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_N_VID, reg);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_M_VID, mode->clock);
+ if (dp->aud_clk) {
+ int aud_rate = clk_get_rate(dp->aud_clk);
+
+ dev_dbg(dp->dev, "Audio rate: %d\n", aud_rate / 512);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_AUDIO_N_AUD,
+ reg);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_AUDIO_M_AUD,
+ aud_rate / 1000);
+ }
+ }
+
+ /* Only 2 channel is supported now */
+ if (dp->aud_clk)
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_AUDIO_CHANNELS, 1);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_USER_PIXEL_WIDTH, 1);
+
+ /* Translate to the native 16 bit datapath based on IP core spec */
+ wpl = (mode->hdisplay * dp->config.bpp + 15) / 16;
+ reg = wpl + wpl % lane_cnt - lane_cnt;
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_USER_DATA_CNT_PER_LANE, reg);
+}
+
+static void xilinx_drm_dp_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ int ret;
+
+ ret = xilinx_drm_dp_mode_configure(dp, adjusted_mode->clock, 0);
+ if (ret < 0)
+ return;
+
+ xilinx_drm_dp_mode_set_stream(dp, adjusted_mode);
+ xilinx_drm_dp_mode_set_transfer_unit(dp, adjusted_mode);
+}
+
+static enum drm_connector_status
+xilinx_drm_dp_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ struct xilinx_drm_dp_link_config *link_config = &dp->link_config;
+ u32 state;
+ int ret;
+
+ state = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_INTR_SIGNAL_STATE);
+ if (state & XILINX_DP_TX_INTR_SIGNAL_STATE_HPD) {
+ dp->status = connector_status_connected;
+ ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0)
+ goto disconnected;
+
+ link_config->max_rate = min_t(int,
+ drm_dp_max_link_rate(dp->dpcd),
+ dp->config.max_link_rate);
+ link_config->max_lanes = min_t(u8,
+ drm_dp_max_lane_count(dp->dpcd),
+ dp->config.max_lanes);
+ return dp->status;
+ }
+
+disconnected:
+ dp->status = connector_status_disconnected;
+ return dp->status;
+}
+
+static int xilinx_drm_dp_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct xilinx_drm_dp *dp = to_dp(encoder);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &dp->aux.ddc);
+ if (!edid)
+ return 0;
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+
+ return ret;
+}
+
+static struct drm_encoder_slave_funcs xilinx_drm_dp_encoder_funcs = {
+ .dpms = xilinx_drm_dp_dpms,
+ .save = xilinx_drm_dp_save,
+ .restore = xilinx_drm_dp_restore,
+ .mode_fixup = xilinx_drm_dp_mode_fixup,
+ .mode_valid = xilinx_drm_dp_mode_valid,
+ .mode_set = xilinx_drm_dp_mode_set,
+ .detect = xilinx_drm_dp_detect,
+ .get_modes = xilinx_drm_dp_get_modes,
+};
+
+static int xilinx_drm_dp_encoder_init(struct platform_device *pdev,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder)
+{
+ struct xilinx_drm_dp *dp = platform_get_drvdata(pdev);
+
+ encoder->slave_priv = dp;
+ encoder->slave_funcs = &xilinx_drm_dp_encoder_funcs;
+
+ dp->encoder = &encoder->base;
+
+ return xilinx_drm_dp_init_aux(dp);
+}
+
+static irqreturn_t xilinx_drm_dp_irq_handler(int irq, void *data)
+{
+ struct xilinx_drm_dp *dp = (struct xilinx_drm_dp *)data;
+ u32 reg, status;
+
+ reg = dp->dp_sub ?
+ XILINX_DP_SUB_TX_INTR_STATUS : XILINX_DP_TX_INTR_STATUS;
+ status = xilinx_drm_readl(dp->iomem, reg);
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XILINX_DP_TX_INTR_CHBUF_UNDERFLW_MASK)
+ dev_dbg(dp->dev, "underflow interrupt\n");
+ if (status & XILINX_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+ dev_dbg(dp->dev, "overflow interrupt\n");
+
+ xilinx_drm_writel(dp->iomem, reg, status);
+
+ if (status & XILINX_DP_TX_INTR_VBLANK_START)
+ xilinx_drm_dp_sub_handle_vblank(dp->dp_sub);
+
+ if (status & XILINX_DP_TX_INTR_HPD_EVENT)
+ drm_helper_hpd_irq_event(dp->encoder->dev);
+
+ if (status & XILINX_DP_TX_INTR_HPD_IRQ) {
+ u8 status[DP_LINK_STATUS_SIZE + 2];
+ int ret;
+
+ ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
+ DP_LINK_STATUS_SIZE + 2);
+ if (ret < 0)
+ goto handled;
+
+ if (status[4] & DP_LINK_STATUS_UPDATED ||
+ !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
+ !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt))
+ xilinx_drm_dp_train_loop(dp);
+ }
+
+handled:
+ return IRQ_HANDLED;
+}
+
+static ssize_t
+xilinx_drm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct xilinx_drm_dp *dp = container_of(aux, struct xilinx_drm_dp, aux);
+ int ret;
+ unsigned int i, iter;
+
+ /* Number of loops = timeout in msec / aux delay (400 usec) */
+ iter = xilinx_drm_dp_aux_timeout_ms * 1000 / 400;
+ iter = iter ? iter : 1;
+
+ for (i = 0; i < iter; i++) {
+ ret = xilinx_drm_dp_aux_cmd_submit(dp, msg->request,
+ msg->address, msg->buffer,
+ msg->size, &msg->reply);
+ if (!ret) {
+ dev_dbg(dp->dev, "aux %d retries\n", i);
+ return msg->size;
+ }
+
+ if (dp->status == connector_status_disconnected) {
+ dev_dbg(dp->dev, "no aux dev\n");
+ return -ENODEV;
+ }
+
+ usleep_range(400, 500);
+ }
+
+ dev_dbg(dp->dev, "failed to do aux transfer (%d)\n", ret);
+
+ return ret;
+}
+
+static int xilinx_drm_dp_parse_of(struct xilinx_drm_dp *dp)
+{
+ struct device_node *node = dp->dev->of_node;
+ struct xilinx_drm_dp_config *config = &dp->config;
+ const char *string;
+ u32 num_colors, bpc;
+ bool sync;
+ int ret;
+
+ ret = of_property_read_string(node, "xlnx,dp-version", &string);
+ if (ret < 0) {
+ dev_err(dp->dev, "No DP version in DT\n");
+ return ret;
+ }
+
+ if (strcmp(string, "v1.1a") == 0) {
+ config->dp_version = DP_V1_1A;
+ } else if (strcmp(string, "v1.2") == 0) {
+ config->dp_version = DP_V1_2;
+ } else {
+ dev_err(dp->dev, "Invalid DP version in DT\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-lanes", &config->max_lanes);
+ if (ret < 0) {
+ dev_err(dp->dev, "No lane count in DT\n");
+ return ret;
+ }
+
+ if (config->max_lanes != 1 && config->max_lanes != 2 &&
+ config->max_lanes != 4) {
+ dev_err(dp->dev, "Invalid max lanes in DT\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-link-rate",
+ &config->max_link_rate);
+ if (ret < 0) {
+ dev_err(dp->dev, "No link rate in DT\n");
+ return ret;
+ }
+
+ if (config->max_link_rate != DP_REDUCED_BIT_RATE &&
+ config->max_link_rate != DP_HIGH_BIT_RATE &&
+ config->max_link_rate != DP_HIGH_BIT_RATE2) {
+ dev_err(dp->dev, "Invalid link rate in DT\n");
+ return -EINVAL;
+ }
+
+ config->enable_yonly = of_property_read_bool(node, "xlnx,enable-yonly");
+ config->enable_ycrcb = of_property_read_bool(node, "xlnx,enable-ycrcb");
+
+ sync = of_property_read_bool(node, "xlnx,sync");
+ if (sync)
+ config->misc0 |= XILINX_DP_TX_MAIN_STREAM_MISC0_SYNC;
+
+ ret = of_property_read_string(node, "xlnx,colormetry", &string);
+ if (ret < 0) {
+ dev_err(dp->dev, "No colormetry in DT\n");
+ return ret;
+ }
+
+ if (strcmp(string, "rgb") == 0) {
+ config->misc0 |= XILINX_DP_MISC0_RGB;
+ num_colors = 3;
+ } else if (config->enable_ycrcb && strcmp(string, "ycrcb422") == 0) {
+ config->misc0 |= XILINX_DP_MISC0_YCRCB_422;
+ num_colors = 2;
+ } else if (config->enable_ycrcb && strcmp(string, "ycrcb444") == 0) {
+ config->misc0 |= XILINX_DP_MISC0_YCRCB_444;
+ num_colors = 3;
+ } else if (config->enable_yonly && strcmp(string, "yonly") == 0) {
+ config->misc1 |= XILINX_DP_MISC1_Y_ONLY;
+ num_colors = 1;
+ } else {
+ dev_err(dp->dev, "Invalid colormetry in DT\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-bpc", &config->max_bpc);
+ if (ret < 0) {
+ dev_err(dp->dev, "No max bpc in DT\n");
+ return ret;
+ }
+
+ if (config->max_bpc != 8 && config->max_bpc != 10 &&
+ config->max_bpc != 12 && config->max_bpc != 16) {
+ dev_err(dp->dev, "Invalid max bpc in DT\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,bpc", &bpc);
+ if (ret < 0) {
+ dev_err(dp->dev, "No color depth(bpc) in DT\n");
+ return ret;
+ }
+
+ if (bpc > config->max_bpc) {
+ dev_err(dp->dev, "Invalid color depth(bpc) in DT\n");
+ return -EINVAL;
+ }
+
+ switch (bpc) {
+ case 6:
+ config->misc0 |= XILINX_DP_MISC0_BPC_6;
+ break;
+ case 8:
+ config->misc0 |= XILINX_DP_MISC0_BPC_8;
+ break;
+ case 10:
+ config->misc0 |= XILINX_DP_MISC0_BPC_10;
+ break;
+ case 12:
+ config->misc0 |= XILINX_DP_MISC0_BPC_12;
+ break;
+ case 16:
+ config->misc0 |= XILINX_DP_MISC0_BPC_16;
+ break;
+ default:
+ dev_err(dp->dev, "Not supported color depth in DT\n");
+ return -EINVAL;
+ }
+
+ config->bpp = num_colors * bpc;
+
+ of_property_read_u32(node, "xlnx,max-pclock-frequency",
+ &config->max_pclock);
+
+ return 0;
+}
+
+static int __maybe_unused xilinx_drm_dp_pm_suspend(struct device *dev)
+{
+ struct xilinx_drm_dp *dp = dev_get_drvdata(dev);
+
+ xilinx_drm_dp_exit_phy(dp);
+
+ return 0;
+}
+
+static int __maybe_unused xilinx_drm_dp_pm_resume(struct device *dev)
+{
+ struct xilinx_drm_dp *dp = dev_get_drvdata(dev);
+
+ xilinx_drm_dp_init_phy(dp);
+ xilinx_drm_dp_init_aux(dp);
+ drm_helper_hpd_irq_event(dp->encoder->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops xilinx_drm_dp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xilinx_drm_dp_pm_suspend,
+ xilinx_drm_dp_pm_resume)
+};
+
+static int xilinx_drm_dp_probe(struct platform_device *pdev)
+{
+ struct xilinx_drm_dp *dp;
+ struct resource *res;
+ u32 version, i;
+ int irq, ret;
+
+ dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->dpms = DRM_MODE_DPMS_OFF;
+ dp->status = connector_status_disconnected;
+ dp->dev = &pdev->dev;
+
+ ret = xilinx_drm_dp_parse_of(dp);
+ if (ret < 0)
+ return ret;
+
+ dp->aclk = devm_clk_get(dp->dev, "aclk");
+ if (IS_ERR(dp->aclk))
+ return PTR_ERR(dp->aclk);
+
+ ret = clk_prepare_enable(dp->aclk);
+ if (ret) {
+ dev_err(dp->dev, "failed to enable the aclk\n");
+ return ret;
+ }
+
+ dp->aud_clk = devm_clk_get(dp->dev, "aud_clk");
+ if (IS_ERR(dp->aud_clk)) {
+ ret = PTR_ERR(dp->aud_clk);
+ if (ret == -EPROBE_DEFER)
+ goto error_aclk;
+ dp->aud_clk = NULL;
+ dev_dbg(dp->dev, "failed to get the aud_clk:\n");
+ }
+
+ dp->dp_sub = xilinx_drm_dp_sub_of_get(pdev->dev.of_node);
+ if (IS_ERR(dp->dp_sub)) {
+ ret = PTR_ERR(dp->dp_sub);
+ goto error_aclk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dp->iomem = devm_ioremap_resource(dp->dev, res);
+ if (IS_ERR(dp->iomem)) {
+ ret = PTR_ERR(dp->iomem);
+ goto error_dp_sub;
+ }
+
+ platform_set_drvdata(pdev, dp);
+
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_PHY_POWER_DOWN,
+ XILINX_DP_TX_PHY_POWER_DOWN_ALL);
+ xilinx_drm_set(dp->iomem, XILINX_DP_TX_PHY_CONFIG,
+ XILINX_DP_TX_PHY_CONFIG_ALL_RESET);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_FORCE_SCRAMBLER_RESET, 1);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_ENABLE, 0);
+
+ if (dp->dp_sub) {
+ for (i = 0; i < dp->config.max_lanes; i++) {
+ char phy_name[16];
+
+ snprintf(phy_name, sizeof(phy_name), "dp-phy%d", i);
+ dp->phy[i] = devm_phy_get(dp->dev, phy_name);
+ if (IS_ERR(dp->phy[i])) {
+ dev_err(dp->dev, "failed to get phy lane\n");
+ ret = PTR_ERR(dp->phy[i]);
+ dp->phy[i] = NULL;
+ goto error_dp_sub;
+ }
+ }
+ }
+
+ ret = xilinx_drm_dp_init_phy(dp);
+ if (ret)
+ goto error_phy;
+
+ dp->aux.name = "Xilinx DP AUX";
+ dp->aux.dev = dp->dev;
+ dp->aux.transfer = xilinx_drm_dp_aux_transfer;
+ ret = drm_dp_aux_register(&dp->aux);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to initialize DP aux\n");
+ goto error;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto error;
+ }
+
+ ret = devm_request_threaded_irq(dp->dev, irq, NULL,
+ xilinx_drm_dp_irq_handler, IRQF_ONESHOT,
+ dev_name(dp->dev), dp);
+ if (ret < 0)
+ goto error;
+
+ version = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_VERSION);
+
+ dev_info(dp->dev, "device found, version %u.%02x%x\n",
+ ((version & XILINX_DP_TX_VERSION_MAJOR_MASK) >>
+ XILINX_DP_TX_VERSION_MAJOR_SHIFT),
+ ((version & XILINX_DP_TX_VERSION_MINOR_MASK) >>
+ XILINX_DP_TX_VERSION_MINOR_SHIFT),
+ ((version & XILINX_DP_TX_VERSION_REVISION_MASK) >>
+ XILINX_DP_TX_VERSION_REVISION_SHIFT));
+
+ version = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_CORE_ID);
+ if (version & XILINX_DP_TX_CORE_ID_DIRECTION) {
+ dev_err(dp->dev, "Receiver is not supported\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ dev_info(dp->dev, "Display Port, version %u.%02x%02x (tx)\n",
+ ((version & XILINX_DP_TX_CORE_ID_MAJOR_MASK) >>
+ XILINX_DP_TX_CORE_ID_MAJOR_SHIFT),
+ ((version & XILINX_DP_TX_CORE_ID_MINOR_MASK) >>
+ XILINX_DP_TX_CORE_ID_MINOR_SHIFT),
+ ((version & XILINX_DP_TX_CORE_ID_REVISION_MASK) >>
+ XILINX_DP_TX_CORE_ID_REVISION_SHIFT));
+
+ pm_runtime_enable(dp->dev);
+
+ xilinx_dp_debugfs_init(dp);
+
+ return 0;
+
+error:
+ drm_dp_aux_unregister(&dp->aux);
+error_dp_sub:
+ xilinx_drm_dp_sub_put(dp->dp_sub);
+error_phy:
+ xilinx_drm_dp_exit_phy(dp);
+error_aclk:
+ clk_disable_unprepare(dp->aclk);
+ return ret;
+}
+
+static int xilinx_drm_dp_remove(struct platform_device *pdev)
+{
+ struct xilinx_drm_dp *dp = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(dp->dev);
+ xilinx_drm_writel(dp->iomem, XILINX_DP_TX_ENABLE, 0);
+
+ drm_dp_aux_unregister(&dp->aux);
+ xilinx_drm_dp_exit_phy(dp);
+ xilinx_drm_dp_sub_put(dp->dp_sub);
+
+ if (dp->aud_clk && dp->aud_clk_enabled)
+ clk_disable_unprepare(dp->aud_clk);
+ clk_disable_unprepare(dp->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_drm_dp_of_match[] = {
+ { .compatible = "xlnx,v-dp", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_drm_dp_of_match);
+
+static struct drm_platform_encoder_driver xilinx_drm_dp_driver = {
+ .platform_driver = {
+ .probe = xilinx_drm_dp_probe,
+ .remove = xilinx_drm_dp_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "xilinx-drm-dp",
+ .of_match_table = xilinx_drm_dp_of_match,
+ .pm = &xilinx_drm_dp_pm_ops,
+ },
+ },
+
+ .encoder_init = xilinx_drm_dp_encoder_init,
+};
+
+static int __init xilinx_drm_dp_init(void)
+{
+ return platform_driver_register(&xilinx_drm_dp_driver.platform_driver);
+}
+
+static void __exit xilinx_drm_dp_exit(void)
+{
+ platform_driver_unregister(&xilinx_drm_dp_driver.platform_driver);
+}
+
+module_init(xilinx_drm_dp_init);
+module_exit(xilinx_drm_dp_exit);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DRM KMS DiplayPort Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.c b/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.c
new file mode 100644
index 000000000000..e3a68b36fafb
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.c
@@ -0,0 +1,2265 @@
+/*
+ * DisplayPort subsystem support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+
+#include "xilinx_drm_dp_sub.h"
+#include "xilinx_drm_drv.h"
+
+/* Blender registers */
+#define XILINX_DP_SUB_V_BLEND_BG_CLR_0 0x0
+#define XILINX_DP_SUB_V_BLEND_BG_CLR_1 0x4
+#define XILINX_DP_SUB_V_BLEND_BG_CLR_2 0x8
+#define XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA 0xc
+#define XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA_MASK 0x1fe
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT 0x14
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB 0x0
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR444 0x1
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR422 0x2
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YONLY 0x3
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_XVYCC 0x4
+#define XILINX_DP_SUB_V_BLEND_OUTPUT_EN_DOWNSAMPLE BIT(4)
+#define XILINX_DP_SUB_V_BLEND_LAYER_CONTROL 0x18
+#define XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_EN_US BIT(0)
+#define XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_RGB BIT(1)
+#define XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_BYPASS BIT(8)
+#define XILINX_DP_SUB_V_BLEND_NUM_COEFF 9
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF0 0x20
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF1 0x24
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF2 0x28
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF3 0x2c
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF4 0x30
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF5 0x34
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF6 0x38
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF7 0x3c
+#define XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF8 0x40
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF0 0x44
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF1 0x48
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF2 0x4c
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF3 0x50
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF4 0x54
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF5 0x58
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF6 0x5c
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF7 0x60
+#define XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF8 0x64
+#define XILINX_DP_SUB_V_BLEND_NUM_OFFSET 3
+#define XILINX_DP_SUB_V_BLEND_LUMA_IN1CSC_OFFSET 0x68
+#define XILINX_DP_SUB_V_BLEND_CR_IN1CSC_OFFSET 0x6c
+#define XILINX_DP_SUB_V_BLEND_CB_IN1CSC_OFFSET 0x70
+#define XILINX_DP_SUB_V_BLEND_LUMA_OUTCSC_OFFSET 0x74
+#define XILINX_DP_SUB_V_BLEND_CR_OUTCSC_OFFSET 0x78
+#define XILINX_DP_SUB_V_BLEND_CB_OUTCSC_OFFSET 0x7c
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF0 0x80
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF1 0x84
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF2 0x88
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF3 0x8c
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF4 0x90
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF5 0x94
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF6 0x98
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF7 0x9c
+#define XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF8 0xa0
+#define XILINX_DP_SUB_V_BLEND_LUMA_IN2CSC_OFFSET 0xa4
+#define XILINX_DP_SUB_V_BLEND_CR_IN2CSC_OFFSET 0xa8
+#define XILINX_DP_SUB_V_BLEND_CB_IN2CSC_OFFSET 0xac
+#define XILINX_DP_SUB_V_BLEND_CHROMA_KEY_ENABLE 0x1d0
+#define XILINX_DP_SUB_V_BLEND_CHROMA_KEY_COMP1 0x1d4
+#define XILINX_DP_SUB_V_BLEND_CHROMA_KEY_COMP2 0x1d8
+#define XILINX_DP_SUB_V_BLEND_CHROMA_KEY_COMP3 0x1dc
+
+/* AV buffer manager registers */
+#define XILINX_DP_SUB_AV_BUF_FMT 0x0
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_SHIFT 0
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_MASK (0x1f << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_UYVY (0 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_VYUY (1 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YVYU (2 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUYV (3 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16 (4 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV24 (5 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI (6 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_MONO (7 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI2 (8 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUV444 (9 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888 (10 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGBA8880 (11 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888_10 (12 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUV444_10 (13 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI2_10 (14 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_10 (15 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_10 (16 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV24_10 (17 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YONLY_10 (18 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_420 (19 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420 (20 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI2_420 (21 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_420_10 (22 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420_10 (23 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI2_420_10 (24 << 0)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_SHIFT 8
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_MASK (0xf << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA8888 (0 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_ABGR8888 (1 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB888 (2 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_BGR888 (3 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA5551 (4 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA4444 (5 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB565 (6 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_8BPP (7 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_4BPP (8 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_2BPP (9 << 8)
+#define XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_1BPP (10 << 8)
+#define XILINX_DP_SUB_AV_BUF_NON_LIVE_LATENCY 0x8
+#define XILINX_DP_SUB_AV_BUF_CHBUF 0x10
+#define XILINX_DP_SUB_AV_BUF_CHBUF_EN BIT(0)
+#define XILINX_DP_SUB_AV_BUF_CHBUF_FLUSH BIT(1)
+#define XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_SHIFT 2
+#define XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_MASK (0xf << 2)
+#define XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_MAX 0xf
+#define XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_AUD_MAX 0x3
+#define XILINX_DP_SUB_AV_BUF_STATUS 0x28
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL 0x2c
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EN BIT(0)
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_SHIFT 1
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_EX_VSYNC 0
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_EX_VID 1
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_EX_AUD 2
+#define XILINX_DP_SUB_AV_BUF_STC_CTRL_EVENT_INT_VSYNC 3
+#define XILINX_DP_SUB_AV_BUF_STC_INIT_VALUE0 0x30
+#define XILINX_DP_SUB_AV_BUF_STC_INIT_VALUE1 0x34
+#define XILINX_DP_SUB_AV_BUF_STC_ADJ 0x38
+#define XILINX_DP_SUB_AV_BUF_STC_VID_VSYNC_TS0 0x3c
+#define XILINX_DP_SUB_AV_BUF_STC_VID_VSYNC_TS1 0x40
+#define XILINX_DP_SUB_AV_BUF_STC_EXT_VSYNC_TS0 0x44
+#define XILINX_DP_SUB_AV_BUF_STC_EXT_VSYNC_TS1 0x48
+#define XILINX_DP_SUB_AV_BUF_STC_CUSTOM_EVENT_TS0 0x4c
+#define XILINX_DP_SUB_AV_BUF_STC_CUSTOM_EVENT_TS1 0x50
+#define XILINX_DP_SUB_AV_BUF_STC_CUSTOM_EVENT2_TS0 0x54
+#define XILINX_DP_SUB_AV_BUF_STC_CUSTOM_EVENT2_TS1 0x58
+#define XILINX_DP_SUB_AV_BUF_STC_SNAPSHOT0 0x60
+#define XILINX_DP_SUB_AV_BUF_STC_SNAPSHOT1 0x64
+#define XILINX_DP_SUB_AV_BUF_OUTPUT 0x70
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_SHIFT 0
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MASK (0x3 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_PL (0 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MEM (1 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_PATTERN (2 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_NONE (3 << 0)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_SHIFT 2
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MASK (0x3 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_DISABLE (0 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MEM (1 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_LIVE (2 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_NONE (3 << 2)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_SHIFT 4
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MASK (0x3 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_PL (0 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MEM (1 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_PATTERN (2 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_DISABLE (3 << 4)
+#define XILINX_DP_SUB_AV_BUF_OUTPUT_AUD2_EN BIT(6)
+#define XILINX_DP_SUB_AV_BUF_HCOUNT_VCOUNT_INT0 0x74
+#define XILINX_DP_SUB_AV_BUF_HCOUNT_VCOUNT_INT1 0x78
+#define XILINX_DP_SUB_AV_BUF_PATTERN_GEN_SELECT 0x100
+#define XILINX_DP_SUB_AV_BUF_CLK_SRC 0x120
+#define XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_FROM_PS BIT(0)
+#define XILINX_DP_SUB_AV_BUF_CLK_SRC_AUD_FROM_PS BIT(1)
+#define XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING BIT(2)
+#define XILINX_DP_SUB_AV_BUF_SRST_REG 0x124
+#define XILINX_DP_SUB_AV_BUF_SRST_REG_VID_RST BIT(1)
+#define XILINX_DP_SUB_AV_BUF_AUDIO_CH_CONFIG 0x12c
+#define XILINX_DP_SUB_AV_BUF_GFX_COMP0_SF 0x200
+#define XILINX_DP_SUB_AV_BUF_GFX_COMP1_SF 0x204
+#define XILINX_DP_SUB_AV_BUF_GFX_COMP2_SF 0x208
+#define XILINX_DP_SUB_AV_BUF_VID_COMP0_SF 0x20c
+#define XILINX_DP_SUB_AV_BUF_VID_COMP1_SF 0x210
+#define XILINX_DP_SUB_AV_BUF_VID_COMP2_SF 0x214
+#define XILINX_DP_SUB_AV_BUF_LIVE_VID_COMP0_SF 0x218
+#define XILINX_DP_SUB_AV_BUF_LIVE_VID_COMP1_SF 0x21c
+#define XILINX_DP_SUB_AV_BUF_LIVE_VID_COMP2_SF 0x220
+#define XILINX_DP_SUB_AV_BUF_4BIT_SF 0x11111
+#define XILINX_DP_SUB_AV_BUF_5BIT_SF 0x10842
+#define XILINX_DP_SUB_AV_BUF_6BIT_SF 0x10410
+#define XILINX_DP_SUB_AV_BUF_8BIT_SF 0x10101
+#define XILINX_DP_SUB_AV_BUF_10BIT_SF 0x10040
+#define XILINX_DP_SUB_AV_BUF_NULL_SF 0
+#define XILINX_DP_SUB_AV_BUF_NUM_SF 3
+#define XILINX_DP_SUB_AV_BUF_LIVE_CB_CR_SWAP 0x224
+#define XILINX_DP_SUB_AV_BUF_PALETTE_MEMORY 0x400
+
+/* Audio registers */
+#define XILINX_DP_SUB_AUD_MIXER_VOLUME 0x0
+#define XILINX_DP_SUB_AUD_MIXER_VOLUME_NO_SCALE 0x20002000
+#define XILINX_DP_SUB_AUD_MIXER_META_DATA 0x4
+#define XILINX_DP_SUB_AUD_CH_STATUS0 0x8
+#define XILINX_DP_SUB_AUD_CH_STATUS1 0xc
+#define XILINX_DP_SUB_AUD_CH_STATUS2 0x10
+#define XILINX_DP_SUB_AUD_CH_STATUS3 0x14
+#define XILINX_DP_SUB_AUD_CH_STATUS4 0x18
+#define XILINX_DP_SUB_AUD_CH_STATUS5 0x1c
+#define XILINX_DP_SUB_AUD_CH_A_DATA0 0x20
+#define XILINX_DP_SUB_AUD_CH_A_DATA1 0x24
+#define XILINX_DP_SUB_AUD_CH_A_DATA2 0x28
+#define XILINX_DP_SUB_AUD_CH_A_DATA3 0x2c
+#define XILINX_DP_SUB_AUD_CH_A_DATA4 0x30
+#define XILINX_DP_SUB_AUD_CH_A_DATA5 0x34
+#define XILINX_DP_SUB_AUD_CH_B_DATA0 0x38
+#define XILINX_DP_SUB_AUD_CH_B_DATA1 0x3c
+#define XILINX_DP_SUB_AUD_CH_B_DATA2 0x40
+#define XILINX_DP_SUB_AUD_CH_B_DATA3 0x44
+#define XILINX_DP_SUB_AUD_CH_B_DATA4 0x48
+#define XILINX_DP_SUB_AUD_CH_B_DATA5 0x4c
+#define XILINX_DP_SUB_AUD_SOFT_RESET 0xc00
+#define XILINX_DP_SUB_AUD_SOFT_RESET_AUD_SRST BIT(0)
+
+#define XILINX_DP_SUB_AV_BUF_NUM_VID_GFX_BUFFERS 4
+#define XILINX_DP_SUB_AV_BUF_NUM_BUFFERS 6
+
+/**
+ * enum xilinx_drm_dp_sub_layer_type - Layer type
+ * @XILINX_DRM_DP_SUB_LAYER_VID: video layer
+ * @XILINX_DRM_DP_SUB_LAYER_GFX: graphics layer
+ */
+enum xilinx_drm_dp_sub_layer_type {
+ XILINX_DRM_DP_SUB_LAYER_VID,
+ XILINX_DRM_DP_SUB_LAYER_GFX
+};
+
+/**
+ * struct xilinx_drm_dp_sub_layer - DP subsystem layer
+ * @id: layer ID
+ * @offset: layer offset in the register space
+ * @avail: flag if layer is available
+ * @primary: flag for primary plane
+ * @enabled: flag if the layer is enabled
+ * @fmt: format descriptor
+ * @drm_fmts: array of supported DRM formats
+ * @num_fmts: number of supported DRM formats
+ * @w: width
+ * @h: height
+ * @other: other layer
+ */
+struct xilinx_drm_dp_sub_layer {
+ enum xilinx_drm_dp_sub_layer_type id;
+ u32 offset;
+ bool avail;
+ bool primary;
+ bool enabled;
+ const struct xilinx_drm_dp_sub_fmt *fmt;
+ u32 *drm_fmts;
+ unsigned int num_fmts;
+ u32 w;
+ u32 h;
+ struct xilinx_drm_dp_sub_layer *other;
+};
+
+/**
+ * struct xilinx_drm_dp_sub_blend - DP subsystem blender
+ * @base: pre-calculated base address
+ */
+struct xilinx_drm_dp_sub_blend {
+ void __iomem *base;
+};
+
+/**
+ * struct xilinx_drm_dp_sub_av_buf - DP subsystem av buffer manager
+ * @base: pre-calculated base address
+ */
+struct xilinx_drm_dp_sub_av_buf {
+ void __iomem *base;
+};
+
+/**
+ * struct xilinx_drm_dp_sub_aud - DP subsystem audio
+ * @base: pre-calculated base address
+ */
+struct xilinx_drm_dp_sub_aud {
+ void __iomem *base;
+};
+
+/**
+ * struct xilinx_drm_dp_sub - DP subsystem
+ * @dev: device structure
+ * @blend: blender device
+ * @av_buf: av buffer manager device
+ * @aud: audio device
+ * @layers: layers
+ * @list: entry in the global DP subsystem list
+ * @vblank_fn: vblank handler
+ * @vblank_data: vblank data to be used in vblank_fn
+ * @vid_clk_pl: flag if the clock is from PL
+ * @alpha: stored global alpha value
+ * @alpha_en: flag if the global alpha is enabled
+ */
+struct xilinx_drm_dp_sub {
+ struct device *dev;
+ struct xilinx_drm_dp_sub_blend blend;
+ struct xilinx_drm_dp_sub_av_buf av_buf;
+ struct xilinx_drm_dp_sub_aud aud;
+ struct xilinx_drm_dp_sub_layer layers[XILINX_DRM_DP_SUB_NUM_LAYERS];
+ struct list_head list;
+ void (*vblank_fn)(void *);
+ void *vblank_data;
+ bool vid_clk_pl;
+ u32 alpha;
+ bool alpha_en;
+};
+
+/**
+ * struct xilinx_drm_dp_sub_fmt - DP subsystem format mapping
+ * @drm_fmt: drm format
+ * @dp_sub_fmt: DP subsystem format
+ * @rgb: flag for RGB formats
+ * @swap: flag to swap r & b for rgb formats, and u & v for yuv formats
+ * @chroma_sub: flag for chroma subsampled formats
+ * @sf: scaling factors for upto 3 color components
+ * @name: format name
+ */
+struct xilinx_drm_dp_sub_fmt {
+ u32 drm_fmt;
+ u32 dp_sub_fmt;
+ bool rgb;
+ bool swap;
+ bool chroma_sub;
+ u32 sf[3];
+ const char *name;
+};
+
+static LIST_HEAD(xilinx_drm_dp_sub_list);
+static DEFINE_MUTEX(xilinx_drm_dp_sub_lock);
+
+#ifdef CONFIG_DRM_XILINX_DP_SUB_DEBUG_FS
+#define XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE 32
+#define XILINX_DP_SUB_DEBUGFS_MAX_BG_COLOR_VAL 0xFFF
+#define IN_RANGE(x, min, max) ({ \
+ typeof(x) _x = (x); \
+ _x >= (min) && _x <= (max); })
+
+/* Match xilinx_dp_testcases vs dp_debugfs_reqs[] entry */
+enum xilinx_dp_sub_testcases {
+ DP_SUB_TC_BG_COLOR,
+ DP_SUB_TC_OUTPUT_FMT,
+ DP_SUB_TC_NONE
+};
+
+struct xilinx_dp_sub_debugfs {
+ enum xilinx_dp_sub_testcases testcase;
+ u16 r_value;
+ u16 g_value;
+ u16 b_value;
+ u32 output_fmt;
+ struct xilinx_drm_dp_sub *xilinx_dp_sub;
+};
+
+static struct xilinx_dp_sub_debugfs dp_sub_debugfs;
+struct xilinx_dp_sub_debugfs_request {
+ const char *req;
+ enum xilinx_dp_sub_testcases tc;
+ ssize_t (*read_handler)(char **kern_buff);
+ ssize_t (*write_handler)(char **cmd);
+};
+
+static s64 xilinx_dp_sub_debugfs_argument_value(char *arg)
+{
+ s64 value;
+
+ if (!arg)
+ return -1;
+
+ if (!kstrtos64(arg, 0, &value))
+ return value;
+
+ return -1;
+}
+
+static void
+xilinx_dp_sub_debugfs_update_v_blend(u16 *sdtv_coeffs, u32 *full_range_offsets)
+{
+ struct xilinx_drm_dp_sub *dp_sub = dp_sub_debugfs.xilinx_dp_sub;
+ u32 offset, i;
+
+ /* Hardcode SDTV coefficients. Can be runtime configurable */
+ offset = XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF0;
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_COEFF; i++)
+ xilinx_drm_writel(dp_sub->blend.base, offset + i * 4,
+ sdtv_coeffs[i]);
+
+ offset = XILINX_DP_SUB_V_BLEND_LUMA_OUTCSC_OFFSET;
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_OFFSET; i++)
+ xilinx_drm_writel(dp_sub->blend.base, offset + i * 4,
+ full_range_offsets[i]);
+}
+
+static void xilinx_dp_sub_debugfs_output_format(u32 fmt)
+{
+ struct xilinx_drm_dp_sub *dp_sub = dp_sub_debugfs.xilinx_dp_sub;
+
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT, fmt);
+
+ if (fmt != XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB) {
+ u16 sdtv_coeffs[] = { 0x4c9, 0x864, 0x1d3,
+ 0x7d4d, 0x7ab3, 0x800,
+ 0x800, 0x794d, 0x7eb3 };
+ u32 full_range_offsets[] = { 0x0, 0x8000000, 0x8000000 };
+
+ xilinx_dp_sub_debugfs_update_v_blend(sdtv_coeffs,
+ full_range_offsets);
+ } else {
+ /* In case of RGB set the reset values*/
+ u16 sdtv_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u32 full_range_offsets[] = { 0x0, 0x0, 0x0 };
+
+ xilinx_dp_sub_debugfs_update_v_blend(sdtv_coeffs,
+ full_range_offsets);
+ }
+}
+
+static ssize_t
+xilinx_dp_sub_debugfs_background_color_write(char **dp_sub_test_arg)
+{
+ char *r_color, *g_color, *b_color;
+ s64 r_val, g_val, b_val;
+
+ r_color = strsep(dp_sub_test_arg, " ");
+ g_color = strsep(dp_sub_test_arg, " ");
+ b_color = strsep(dp_sub_test_arg, " ");
+
+ /* char * to int conversion */
+ r_val = xilinx_dp_sub_debugfs_argument_value(r_color);
+ g_val = xilinx_dp_sub_debugfs_argument_value(g_color);
+ b_val = xilinx_dp_sub_debugfs_argument_value(b_color);
+
+ if (!(IN_RANGE(r_val, 0, XILINX_DP_SUB_DEBUGFS_MAX_BG_COLOR_VAL) &&
+ IN_RANGE(g_val, 0, XILINX_DP_SUB_DEBUGFS_MAX_BG_COLOR_VAL) &&
+ IN_RANGE(b_val, 0, XILINX_DP_SUB_DEBUGFS_MAX_BG_COLOR_VAL)))
+ return -EINVAL;
+
+ dp_sub_debugfs.r_value = r_val;
+ dp_sub_debugfs.g_value = g_val;
+ dp_sub_debugfs.b_value = b_val;
+
+ dp_sub_debugfs.testcase = DP_SUB_TC_BG_COLOR;
+
+ return 0;
+}
+
+static ssize_t
+xilinx_dp_sub_debugfs_output_display_format_write(char **dp_sub_test_arg)
+{
+ char *output_format;
+ struct xilinx_drm_dp_sub *dp_sub = dp_sub_debugfs.xilinx_dp_sub;
+ u32 fmt;
+
+ /* Read the value from an user value */
+ output_format = strsep(dp_sub_test_arg, " ");
+ if (strncmp(output_format, "rgb", 3) == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB;
+ } else if (strncmp(output_format, "ycbcr444", 8) == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR444;
+ } else if (strncmp(output_format, "ycbcr422", 8) == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR422;
+ fmt |= XILINX_DP_SUB_V_BLEND_OUTPUT_EN_DOWNSAMPLE;
+ } else if (strncmp(output_format, "yonly", 5) == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YONLY;
+ } else {
+ dev_err(dp_sub->dev, "Invalid output format\n");
+ return -EINVAL;
+ }
+
+ dp_sub_debugfs.output_fmt =
+ xilinx_drm_readl(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT);
+
+ xilinx_dp_sub_debugfs_output_format(fmt);
+ dp_sub_debugfs.testcase = DP_SUB_TC_OUTPUT_FMT;
+
+ return 0;
+}
+
+static ssize_t
+xilinx_dp_sub_debugfs_output_display_format_read(char **kern_buff)
+{
+ size_t out_str_len;
+
+ dp_sub_debugfs.testcase = DP_SUB_TC_NONE;
+ xilinx_dp_sub_debugfs_output_format(dp_sub_debugfs.output_fmt);
+
+ out_str_len = strlen("Success");
+ out_str_len = min_t(size_t, XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(*kern_buff, out_str_len, "%s", "Success");
+
+ return 0;
+}
+
+static ssize_t
+xilinx_dp_sub_debugfs_background_color_read(char **kern_buff)
+{
+ size_t out_str_len;
+
+ dp_sub_debugfs.testcase = DP_SUB_TC_NONE;
+ dp_sub_debugfs.r_value = 0;
+ dp_sub_debugfs.g_value = 0;
+ dp_sub_debugfs.b_value = 0;
+
+ out_str_len = strlen("Success");
+ out_str_len = min_t(size_t, XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(*kern_buff, out_str_len, "%s", "Success");
+
+ return 0;
+}
+
+/* Match xilinx_dp_testcases vs dp_debugfs_reqs[] entry */
+static struct xilinx_dp_sub_debugfs_request dp_sub_debugfs_reqs[] = {
+ {"BACKGROUND_COLOR", DP_SUB_TC_BG_COLOR,
+ xilinx_dp_sub_debugfs_background_color_read,
+ xilinx_dp_sub_debugfs_background_color_write},
+ {"OUTPUT_DISPLAY_FORMAT", DP_SUB_TC_OUTPUT_FMT,
+ xilinx_dp_sub_debugfs_output_display_format_read,
+ xilinx_dp_sub_debugfs_output_display_format_write},
+};
+
+static ssize_t
+xilinx_dp_sub_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff, *dp_sub_test_req, *kern_buff_start;
+ int ret;
+ unsigned int i;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ if (dp_sub_debugfs.testcase != DP_SUB_TC_NONE)
+ return -EBUSY;
+
+ kern_buff = kzalloc(size, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+ kern_buff_start = kern_buff;
+
+ ret = strncpy_from_user(kern_buff, buf, size);
+ if (ret < 0) {
+ kfree(kern_buff_start);
+ return ret;
+ }
+
+ /* Read the testcase name and argument from an user request */
+ dp_sub_test_req = strsep(&kern_buff, " ");
+
+ for (i = 0; i < ARRAY_SIZE(dp_sub_debugfs_reqs); i++) {
+ if (!strcasecmp(dp_sub_test_req, dp_sub_debugfs_reqs[i].req))
+ if (!dp_sub_debugfs_reqs[i].write_handler(&kern_buff)) {
+ kfree(kern_buff_start);
+ return size;
+ }
+ }
+ kfree(kern_buff_start);
+ return -EINVAL;
+}
+
+static ssize_t xilinx_dp_sub_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff = NULL;
+ size_t kern_buff_len, out_str_len;
+ int ret;
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (*pos != 0)
+ return 0;
+
+ kern_buff = kzalloc(XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
+ if (!kern_buff) {
+ dp_sub_debugfs.testcase = DP_SUB_TC_NONE;
+ return -ENOMEM;
+ }
+
+ if (dp_sub_debugfs.testcase == DP_SUB_TC_NONE) {
+ out_str_len = strlen("No testcase executed");
+ out_str_len = min_t(size_t, XILINX_DP_SUB_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(kern_buff, out_str_len, "%s", "No testcase executed");
+ } else {
+ ret = dp_sub_debugfs_reqs[dp_sub_debugfs.testcase].read_handler(
+ &kern_buff);
+ if (ret) {
+ kfree(kern_buff);
+ return ret;
+ }
+ }
+
+ kern_buff_len = strlen(kern_buff);
+ size = min(size, kern_buff_len);
+
+ ret = copy_to_user(buf, kern_buff, size);
+
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static const struct file_operations fops_xilinx_dp_sub_dbgfs = {
+ .owner = THIS_MODULE,
+ .read = xilinx_dp_sub_debugfs_read,
+ .write = xilinx_dp_sub_debugfs_write,
+};
+
+static int xilinx_dp_sub_debugfs_init(struct xilinx_drm_dp_sub *dp_sub)
+{
+ int err;
+ struct dentry *xilinx_dp_sub_debugfs_dir, *xilinx_dp_sub_debugfs_file;
+
+ dp_sub_debugfs.testcase = DP_SUB_TC_NONE;
+ dp_sub_debugfs.xilinx_dp_sub = dp_sub;
+
+ xilinx_dp_sub_debugfs_dir = debugfs_create_dir("dp_sub", NULL);
+ if (!xilinx_dp_sub_debugfs_dir) {
+ dev_err(dp_sub->dev, "debugfs_create_dir failed\n");
+ return -ENODEV;
+ }
+
+ xilinx_dp_sub_debugfs_file =
+ debugfs_create_file("testcase", 0444,
+ xilinx_dp_sub_debugfs_dir, NULL,
+ &fops_xilinx_dp_sub_dbgfs);
+ if (!xilinx_dp_sub_debugfs_file) {
+ dev_err(dp_sub->dev, "debugfs_create_file testcase failed\n");
+ err = -ENODEV;
+ goto err_dbgfs;
+ }
+ return 0;
+
+err_dbgfs:
+ debugfs_remove_recursive(xilinx_dp_sub_debugfs_dir);
+ xilinx_dp_sub_debugfs_dir = NULL;
+ return err;
+}
+
+static void xilinx_drm_dp_sub_debugfs_bg_color(struct xilinx_drm_dp_sub *dp_sub)
+{
+ if (dp_sub_debugfs.testcase == DP_SUB_TC_BG_COLOR) {
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_BG_CLR_0,
+ dp_sub_debugfs.r_value);
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_BG_CLR_1,
+ dp_sub_debugfs.g_value);
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_BG_CLR_2,
+ dp_sub_debugfs.b_value);
+ }
+}
+#else
+static int xilinx_dp_sub_debugfs_init(struct xilinx_drm_dp_sub *dp_sub)
+{
+ return 0;
+}
+
+static void xilinx_drm_dp_sub_debugfs_bg_color(struct xilinx_drm_dp_sub *dp_sub)
+{
+}
+#endif /* CONFIG_DP_DEBUG_FS */
+
+/* Blender functions */
+
+/**
+ * xilinx_drm_dp_sub_blend_layer_enable - Enable a layer
+ * @blend: blend object
+ * @layer: layer to enable
+ *
+ * Enable a layer @layer.
+ */
+static void
+xilinx_drm_dp_sub_blend_layer_enable(struct xilinx_drm_dp_sub_blend *blend,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ u32 reg, offset, i, s0, s1;
+ u16 sdtv_coeffs[] = { 0x1000, 0x166f, 0x0,
+ 0x1000, 0x7483, 0x7a7f,
+ 0x1000, 0x0, 0x1c5a };
+ u16 swap_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u16 *coeffs;
+ u32 offsets[] = { 0x0, 0x1800, 0x1800 };
+
+ reg = layer->fmt->rgb ? XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_RGB : 0;
+ reg |= layer->fmt->chroma_sub ?
+ XILINX_DP_SUB_V_BLEND_LAYER_CONTROL_EN_US : 0;
+
+ xilinx_drm_writel(blend->base,
+ XILINX_DP_SUB_V_BLEND_LAYER_CONTROL + layer->offset,
+ reg);
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID)
+ offset = XILINX_DP_SUB_V_BLEND_IN1CSC_COEFF0;
+ else
+ offset = XILINX_DP_SUB_V_BLEND_IN2CSC_COEFF0;
+
+ if (!layer->fmt->rgb) {
+ coeffs = sdtv_coeffs;
+ s0 = 1;
+ s1 = 2;
+ } else {
+ coeffs = swap_coeffs;
+ s0 = 0;
+ s1 = 2;
+
+ /* No offset for RGB formats */
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_OFFSET; i++)
+ offsets[i] = 0;
+ }
+
+ if (layer->fmt->swap) {
+ for (i = 0; i < 3; i++) {
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ coeffs[i * 3 + s1] ^= coeffs[i * 3 + s0];
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ }
+ }
+
+ /* Program coefficients. Can be runtime configurable */
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_COEFF; i++)
+ xilinx_drm_writel(blend->base, offset + i * 4, coeffs[i]);
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID)
+ offset = XILINX_DP_SUB_V_BLEND_LUMA_IN1CSC_OFFSET;
+ else
+ offset = XILINX_DP_SUB_V_BLEND_LUMA_IN2CSC_OFFSET;
+
+ /* Program offsets. Can be runtime configurable */
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_OFFSET; i++)
+ xilinx_drm_writel(blend->base, offset + i * 4, offsets[i]);
+}
+
+/**
+ * xilinx_drm_dp_sub_blend_layer_disable - Disable a layer
+ * @blend: blend object
+ * @layer: layer to disable
+ *
+ * Disable a layer @layer.
+ */
+static void
+xilinx_drm_dp_sub_blend_layer_disable(struct xilinx_drm_dp_sub_blend *blend,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ xilinx_drm_writel(blend->base,
+ XILINX_DP_SUB_V_BLEND_LAYER_CONTROL + layer->offset,
+ 0);
+}
+
+/**
+ * xilinx_drm_dp_sub_blend_set_bg_color - Set the background color
+ * @blend: blend object
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color.
+ */
+static void
+xilinx_drm_dp_sub_blend_set_bg_color(struct xilinx_drm_dp_sub_blend *blend,
+ u32 c0, u32 c1, u32 c2)
+{
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_BG_CLR_0, c0);
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_BG_CLR_1, c1);
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_BG_CLR_2, c2);
+}
+
+/**
+ * xilinx_drm_dp_sub_blend_set_alpha - Set the alpha for blending
+ * @blend: blend object
+ * @alpha: alpha value to be used
+ *
+ * Set the alpha for blending.
+ */
+static void
+xilinx_drm_dp_sub_blend_set_alpha(struct xilinx_drm_dp_sub_blend *blend,
+ u32 alpha)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(blend->base,
+ XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA);
+ reg &= ~XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA_MASK;
+ reg |= alpha << 1;
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA,
+ reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_blend_enable_alpha - Enable/disable the global alpha
+ * @blend: blend object
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Enable/disable the global alpha blending based on @enable.
+ */
+static void
+xilinx_drm_dp_sub_blend_enable_alpha(struct xilinx_drm_dp_sub_blend *blend,
+ bool enable)
+{
+ if (enable)
+ xilinx_drm_set(blend->base,
+ XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+ else
+ xilinx_drm_clr(blend->base,
+ XILINX_DP_SUB_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+}
+
+static const struct xilinx_drm_dp_sub_fmt blend_output_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .dp_sub_fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "rgb888",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV444,
+ .dp_sub_fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR444,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuv444",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .dp_sub_fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR422,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .name = "yuv422",
+ }, {
+ }
+};
+
+/**
+ * xilinx_drm_dp_sub_blend_set_output_fmt - Set the output format
+ * @blend: blend object
+ * @fmt: output format
+ *
+ * Set the output format to @fmt.
+ */
+static void
+xilinx_drm_dp_sub_blend_set_output_fmt(struct xilinx_drm_dp_sub_blend *blend,
+ u32 fmt)
+{
+ xilinx_drm_writel(blend->base, XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT,
+ fmt);
+}
+
+/* AV buffer manager functions */
+
+static const struct xilinx_drm_dp_sub_fmt av_buf_vid_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_VYUY,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "vyuy",
+ }, {
+ .drm_fmt = DRM_FORMAT_UYVY,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "uyvy",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUYV,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuyv",
+ }, {
+ .drm_fmt = DRM_FORMAT_YVYU,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yvyu",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuv422",
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU422,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yvu422",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV444,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuv444",
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU444,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yvu444",
+ }, {
+ .drm_fmt = DRM_FORMAT_NV16,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "nv16",
+ }, {
+ .drm_fmt = DRM_FORMAT_NV61,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "nv61",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "bgr888",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "rgb888",
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "xbgr8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "xrgb8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR2101010,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .name = "xbgr2101010",
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB2101010,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .name = "xrgb2101010",
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV420,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yuv420",
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU420,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "yvu420",
+ }, {
+ .drm_fmt = DRM_FORMAT_NV12,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "nv12",
+ }, {
+ .drm_fmt = DRM_FORMAT_NV21,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "nv21",
+ }, {
+ .drm_fmt = DRM_FORMAT_XV15,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_420_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .name = "yuv42010b",
+ }, {
+ .drm_fmt = DRM_FORMAT_XV20,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_VID_YV16CI_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_10BIT_SF,
+ .name = "yuv42210b",
+ }
+};
+
+static const struct xilinx_drm_dp_sub_fmt av_buf_gfx_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_ABGR8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "abgr8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_ARGB8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "argb8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "rgba8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA8888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "bgra8888",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "bgr888",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_BGR888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_8BIT_SF,
+ .name = "rgb888",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA5551,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .name = "rgba5551",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA5551,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .name = "bgra5551",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA4444,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .name = "rgba4444",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA4444,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_4BIT_SF,
+ .name = "bgra4444",
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB565,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_6BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .name = "rgb565",
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR565,
+ .dp_sub_fmt = XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .sf[1] = XILINX_DP_SUB_AV_BUF_6BIT_SF,
+ .sf[2] = XILINX_DP_SUB_AV_BUF_5BIT_SF,
+ .name = "bgr565",
+ }
+};
+
+/**
+ * xilinx_drm_dp_sub_av_buf_set_fmt - Set the input formats
+ * @av_buf: av buffer manager
+ * @fmt: formats
+ *
+ * Set the av buffer manager format to @fmt. @fmt should have valid values
+ * for both video and graphics layer.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_set_fmt(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ u32 fmt)
+{
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_FMT, fmt);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_get_fmt - Get the input formats
+ * @av_buf: av buffer manager
+ *
+ * Get the input formats (which include video and graphics) of
+ * av buffer manager.
+ *
+ * Return: value of XILINX_DP_SUB_AV_BUF_FMT register.
+ */
+static u32
+xilinx_drm_dp_sub_av_buf_get_fmt(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ return xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_FMT);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_set_vid_clock_src - Set the video clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the video clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void xilinx_drm_dp_sub_av_buf_set_vid_clock_src(
+ struct xilinx_drm_dp_sub_av_buf *av_buf, bool from_ps)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC);
+ if (from_ps)
+ reg |= XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_FROM_PS;
+ else
+ reg &= ~XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_FROM_PS;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_set_vid_timing_src - Set the video timing source
+ * @av_buf: av buffer manager
+ * @internal: flag if the video timing is generated internally
+ *
+ * Set the video timing source based on @internal. It can come externally or
+ * be generated internally.
+ */
+static void xilinx_drm_dp_sub_av_buf_set_vid_timing_src(
+ struct xilinx_drm_dp_sub_av_buf *av_buf, bool internal)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC);
+ if (internal)
+ reg |= XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ else
+ reg &= ~XILINX_DP_SUB_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_set_aud_clock_src - Set the audio clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the audio clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void xilinx_drm_dp_sub_av_buf_set_aud_clock_src(
+ struct xilinx_drm_dp_sub_av_buf *av_buf, bool from_ps)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC);
+ if (from_ps)
+ reg |= XILINX_DP_SUB_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ else
+ reg &= ~XILINX_DP_SUB_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_enable_buf - Enable buffers
+ * @av_buf: av buffer manager
+ *
+ * Enable all (video and audio) buffers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_enable_buf(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = XILINX_DP_SUB_AV_BUF_CHBUF_EN;
+ reg |= XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_MAX <<
+ XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (i = 0; i < XILINX_DP_SUB_AV_BUF_NUM_VID_GFX_BUFFERS; i++)
+ xilinx_drm_writel(av_buf->base,
+ XILINX_DP_SUB_AV_BUF_CHBUF + i * 4, reg);
+
+ reg = XILINX_DP_SUB_AV_BUF_CHBUF_EN;
+ reg |= XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_AUD_MAX <<
+ XILINX_DP_SUB_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (; i < XILINX_DP_SUB_AV_BUF_NUM_BUFFERS; i++)
+ xilinx_drm_writel(av_buf->base,
+ XILINX_DP_SUB_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_disable_buf - Disable buffers
+ * @av_buf: av buffer manager
+ *
+ * Disable all (video and audio) buffers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_disable_buf(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = XILINX_DP_SUB_AV_BUF_CHBUF_FLUSH & ~XILINX_DP_SUB_AV_BUF_CHBUF_EN;
+ for (i = 0; i < XILINX_DP_SUB_AV_BUF_NUM_BUFFERS; i++)
+ xilinx_drm_writel(av_buf->base,
+ XILINX_DP_SUB_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_enable_aud - Enable audio
+ * @av_buf: av buffer manager
+ *
+ * Enable all audio buffers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_enable_aud(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT);
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MEM;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_AUD2_EN;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_enable - Enable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * De-assert the video pipe reset
+ */
+static void
+xilinx_drm_dp_sub_av_buf_enable(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_SRST_REG, 0);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_disable - Disable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * Assert the video pipe reset
+ */
+static void
+xilinx_drm_dp_sub_av_buf_disable(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_SRST_REG,
+ XILINX_DP_SUB_AV_BUF_SRST_REG_VID_RST);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_disable_aud - Disable audio
+ * @av_buf: av buffer manager
+ *
+ * Disable all audio buffers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_disable_aud(struct xilinx_drm_dp_sub_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT);
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_AUD1_DISABLE;
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_AUD2_EN;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_enable_vid - Enable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to enable
+ *
+ * Enable the video/graphics buffer for @layer.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_enable_vid(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT);
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID) {
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MEM;
+ } else {
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MEM;
+ }
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_disable_vid - Disable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to disable
+ *
+ * Disable the video/graphics buffer for @layer.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_disable_vid(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT);
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID) {
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_VID1_NONE;
+ } else {
+ reg &= ~XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= XILINX_DP_SUB_AV_BUF_OUTPUT_VID2_DISABLE;
+ }
+
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_init_fmts - Initialize the layer formats
+ * @av_buf: av buffer manager
+ * @vid_fmt: video format descriptor
+ * @gfx_fmt: graphics format descriptor
+ *
+ * Initialize formats of both video and graphics layers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_init_fmts(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ const struct xilinx_drm_dp_sub_fmt *vid_fmt,
+ const struct xilinx_drm_dp_sub_fmt *gfx_fmt)
+{
+ u32 reg;
+
+ reg = vid_fmt->dp_sub_fmt;
+ reg |= gfx_fmt->dp_sub_fmt;
+ xilinx_drm_writel(av_buf->base, XILINX_DP_SUB_AV_BUF_FMT, reg);
+}
+
+/**
+ * xilinx_drm_dp_sub_av_buf_init_sf - Initialize scaling factors
+ * @av_buf: av buffer manager
+ * @vid_fmt: video format descriptor
+ * @gfx_fmt: graphics format descriptor
+ *
+ * Initialize scaling factors for both video and graphics layers.
+ */
+static void
+xilinx_drm_dp_sub_av_buf_init_sf(struct xilinx_drm_dp_sub_av_buf *av_buf,
+ const struct xilinx_drm_dp_sub_fmt *vid_fmt,
+ const struct xilinx_drm_dp_sub_fmt *gfx_fmt)
+{
+ unsigned int i;
+ int offset;
+
+ if (gfx_fmt) {
+ offset = XILINX_DP_SUB_AV_BUF_GFX_COMP0_SF;
+ for (i = 0; i < XILINX_DP_SUB_AV_BUF_NUM_SF; i++)
+ xilinx_drm_writel(av_buf->base, offset + i * 4,
+ gfx_fmt->sf[i]);
+ }
+
+ if (vid_fmt) {
+ offset = XILINX_DP_SUB_AV_BUF_VID_COMP0_SF;
+ for (i = 0; i < XILINX_DP_SUB_AV_BUF_NUM_SF; i++)
+ xilinx_drm_writel(av_buf->base, offset + i * 4,
+ vid_fmt->sf[i]);
+ }
+}
+
+/* Audio functions */
+
+/**
+ * xilinx_drm_dp_sub_aud_init - Initialize the audio
+ * @aud: audio
+ *
+ * Initialize the audio with default mixer volume. The de-assertion will
+ * initialize the audio states.
+ */
+static void xilinx_drm_dp_sub_aud_init(struct xilinx_drm_dp_sub_aud *aud)
+{
+ /* Clear the audio soft reset register as it's an non-reset flop */
+ xilinx_drm_writel(aud->base, XILINX_DP_SUB_AUD_SOFT_RESET, 0);
+ xilinx_drm_writel(aud->base, XILINX_DP_SUB_AUD_MIXER_VOLUME,
+ XILINX_DP_SUB_AUD_MIXER_VOLUME_NO_SCALE);
+}
+
+/**
+ * xilinx_drm_dp_sub_aud_deinit - De-initialize the audio
+ * @aud: audio
+ *
+ * Put the audio in reset.
+ */
+static void xilinx_drm_dp_sub_aud_deinit(struct xilinx_drm_dp_sub_aud *aud)
+{
+ xilinx_drm_set(aud->base, XILINX_DP_SUB_AUD_SOFT_RESET,
+ XILINX_DP_SUB_AUD_SOFT_RESET_AUD_SRST);
+}
+
+/* DP subsystem layer functions */
+
+/**
+ * xilinx_drm_dp_sub_layer_check_size - Verify width and height for the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer
+ * @width: width
+ * @height: height
+ *
+ * The DP subsystem has the limitation that both layers should have
+ * identical size. This function stores width and height of @layer, and verifies
+ * if the size (width and height) is valid.
+ *
+ * Return: 0 on success, or -EINVAL if width or/and height is invalid.
+ */
+int xilinx_drm_dp_sub_layer_check_size(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 width, u32 height)
+{
+ struct xilinx_drm_dp_sub_layer *other = layer->other;
+
+ if (other->enabled && (other->w != width || other->h != height)) {
+ dev_err(dp_sub->dev, "Layer width:height must be %d:%d\n",
+ other->w, other->h);
+ return -EINVAL;
+ }
+
+ layer->w = width;
+ layer->h = height;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_check_size);
+
+/**
+ * xilinx_drm_dp_sub_map_fmt - Find the DP subsystem format for given drm format
+ * @fmts: format table to look up
+ * @size: size of the table @fmts
+ * @drm_fmt: DRM format to search
+ *
+ * Search a DP subsystem format corresponding to the given DRM format @drm_fmt,
+ * and return the format descriptor which contains the DP subsystem format
+ * value.
+ *
+ * Return: a DP subsystem format descriptor on success, or NULL.
+ */
+static const struct xilinx_drm_dp_sub_fmt *
+xilinx_drm_dp_sub_map_fmt(const struct xilinx_drm_dp_sub_fmt fmts[],
+ unsigned int size, u32 drm_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (fmts[i].drm_fmt == drm_fmt)
+ return &fmts[i];
+
+ return NULL;
+}
+
+/**
+ * xilinx_drm_dp_sub_set_fmt - Set the format of the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to set the format
+ * @drm_fmt: DRM format to set
+ *
+ * Set the format of the given layer to @drm_fmt.
+ *
+ * Return: 0 on success. -EINVAL if @drm_fmt is not supported by the layer.
+ */
+int xilinx_drm_dp_sub_layer_set_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 drm_fmt)
+{
+ const struct xilinx_drm_dp_sub_fmt *fmt;
+ const struct xilinx_drm_dp_sub_fmt *vid_fmt = NULL, *gfx_fmt = NULL;
+ u32 size, fmts, mask;
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID) {
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ mask = ~XILINX_DP_SUB_AV_BUF_FMT_NL_VID_MASK;
+ fmt = xilinx_drm_dp_sub_map_fmt(av_buf_vid_fmts, size, drm_fmt);
+ vid_fmt = fmt;
+ } else {
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ mask = ~XILINX_DP_SUB_AV_BUF_FMT_NL_GFX_MASK;
+ fmt = xilinx_drm_dp_sub_map_fmt(av_buf_gfx_fmts, size, drm_fmt);
+ gfx_fmt = fmt;
+ }
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmts = xilinx_drm_dp_sub_av_buf_get_fmt(&dp_sub->av_buf);
+ fmts &= mask;
+ fmts |= fmt->dp_sub_fmt;
+ xilinx_drm_dp_sub_av_buf_set_fmt(&dp_sub->av_buf, fmts);
+ xilinx_drm_dp_sub_av_buf_init_sf(&dp_sub->av_buf, vid_fmt, gfx_fmt);
+
+ layer->fmt = fmt;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_set_fmt);
+
+/**
+ * xilinx_drm_dp_sub_get_fmt - Get the format of the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to set the format
+ *
+ * Get the format of the given layer.
+ *
+ * Return: DRM format of the layer
+ */
+u32 xilinx_drm_dp_sub_layer_get_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ return layer->fmt->drm_fmt;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_get_fmt);
+
+/**
+ * xilinx_drm_dp_sub_get_fmt - Get the supported DRM formats of the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to get the formats
+ * @drm_fmts: pointer to array of DRM format strings
+ * @num_fmts: pointer to number of returned DRM formats
+ *
+ * Get the supported DRM formats of the given layer.
+ */
+void xilinx_drm_dp_sub_layer_get_fmts(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 **drm_fmts,
+ unsigned int *num_fmts)
+{
+ *drm_fmts = layer->drm_fmts;
+ *num_fmts = layer->num_fmts;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_get_fmts);
+
+/**
+ * xilinx_drm_dp_sub_layer_enable - Enable the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to esable
+ *
+ * Enable the layer @layer.
+ */
+void xilinx_drm_dp_sub_layer_enable(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ xilinx_drm_dp_sub_av_buf_enable_vid(&dp_sub->av_buf, layer);
+ xilinx_drm_dp_sub_blend_layer_enable(&dp_sub->blend, layer);
+ layer->enabled = true;
+ if (layer->other->enabled) {
+ xilinx_drm_dp_sub_blend_set_alpha(&dp_sub->blend,
+ dp_sub->alpha);
+ xilinx_drm_dp_sub_blend_enable_alpha(&dp_sub->blend,
+ dp_sub->alpha_en);
+ } else {
+ u32 alpha;
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID)
+ alpha = 0;
+ else
+ alpha = XILINX_DRM_DP_SUB_MAX_ALPHA;
+ xilinx_drm_dp_sub_blend_set_alpha(&dp_sub->blend, alpha);
+ xilinx_drm_dp_sub_blend_enable_alpha(&dp_sub->blend, true);
+ }
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_enable);
+
+/**
+ * xilinx_drm_dp_sub_layer_enable - Disable the layer
+ * @dp_sub: DP subsystem
+ * @layer: layer to disable
+ *
+ * Disable the layer @layer.
+ */
+void xilinx_drm_dp_sub_layer_disable(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ xilinx_drm_dp_sub_av_buf_disable_vid(&dp_sub->av_buf, layer);
+ xilinx_drm_dp_sub_blend_layer_disable(&dp_sub->blend, layer);
+ layer->enabled = false;
+ if (layer->other->enabled) {
+ u32 alpha;
+
+ if (layer->id == XILINX_DRM_DP_SUB_LAYER_VID)
+ alpha = XILINX_DRM_DP_SUB_MAX_ALPHA;
+ else
+ alpha = 0;
+ xilinx_drm_dp_sub_blend_set_alpha(&dp_sub->blend, alpha);
+ xilinx_drm_dp_sub_blend_enable_alpha(&dp_sub->blend, true);
+ }
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_disable);
+
+/**
+ * xilinx_drm_dp_sub_layer_get - Get the DP subsystem layer
+ * @dp_sub: DP subsystem
+ * @primary: flag to indicate the primary plane
+ *
+ * Check if there's any available layer based on the flag @primary, and return
+ * the found layer.
+ *
+ * Return: a DP subsystem layer on success, or -ENODEV error pointer.
+ */
+struct xilinx_drm_dp_sub_layer *
+xilinx_drm_dp_sub_layer_get(struct xilinx_drm_dp_sub *dp_sub, bool primary)
+{
+ struct xilinx_drm_dp_sub_layer *layer = NULL;
+ unsigned int i;
+
+ for (i = 0; i < XILINX_DRM_DP_SUB_NUM_LAYERS; i++) {
+ if (dp_sub->layers[i].primary == primary) {
+ layer = &dp_sub->layers[i];
+ break;
+ }
+ }
+
+ if (!layer || !layer->avail)
+ return ERR_PTR(-ENODEV);
+
+ return layer;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_get);
+
+/**
+ * xilinx_drm_dp_sub_layer_get - Put the DP subsystem layer
+ * @dp_sub: DP subsystem
+ * @layer: DP subsystem layer
+ *
+ * Return the DP subsystem layer @layer when it's no longer used.
+ */
+void xilinx_drm_dp_sub_layer_put(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer)
+{
+ layer->avail = true;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_layer_put);
+
+/* DP subsystem functions */
+
+/**
+ * xilinx_drm_dp_sub_set_output_fmt - Set the output format
+ * @dp_sub: DP subsystem
+ * @drm_fmt: DRM format to set
+ *
+ * Set the output format of the DP subsystem. The flag @primary indicates that
+ * which layer to configure.
+ *
+ * Return: 0 on success, or -EINVAL if @drm_fmt is not supported for output.
+ */
+int xilinx_drm_dp_sub_set_output_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ u32 drm_fmt)
+{
+ const struct xilinx_drm_dp_sub_fmt *fmt;
+
+ fmt = xilinx_drm_dp_sub_map_fmt(blend_output_fmts,
+ ARRAY_SIZE(blend_output_fmts), drm_fmt);
+ if (!fmt)
+ return -EINVAL;
+
+ xilinx_drm_dp_sub_blend_set_output_fmt(&dp_sub->blend, fmt->dp_sub_fmt);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_set_output_fmt);
+
+/**
+ * xilinx_drm_dp_sub_set_bg_color - Set the background color
+ * @dp_sub: DP subsystem
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color with given color components (@c0, @c1, @c2).
+ */
+void xilinx_drm_dp_sub_set_bg_color(struct xilinx_drm_dp_sub *dp_sub,
+ u32 c0, u32 c1, u32 c2)
+{
+ xilinx_drm_dp_sub_blend_set_bg_color(&dp_sub->blend, c0, c1, c2);
+ xilinx_drm_dp_sub_debugfs_bg_color(dp_sub);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_set_bg_color);
+
+/**
+ * xilinx_drm_dp_sub_set_alpha - Set the alpha value
+ * @dp_sub: DP subsystem
+ * @alpha: alpha value to set
+ *
+ * Set the alpha value for blending.
+ */
+void xilinx_drm_dp_sub_set_alpha(struct xilinx_drm_dp_sub *dp_sub, u32 alpha)
+{
+ dp_sub->alpha = alpha;
+ if (dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID].enabled &&
+ dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX].enabled)
+ xilinx_drm_dp_sub_blend_set_alpha(&dp_sub->blend, alpha);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_set_alpha);
+
+/**
+ * xilinx_drm_dp_sub_enable_alpha - Enable/disable the global alpha blending
+ * @dp_sub: DP subsystem
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Set the alpha value for blending.
+ */
+void
+xilinx_drm_dp_sub_enable_alpha(struct xilinx_drm_dp_sub *dp_sub, bool enable)
+{
+ dp_sub->alpha_en = enable;
+ if (dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID].enabled &&
+ dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX].enabled)
+ xilinx_drm_dp_sub_blend_enable_alpha(&dp_sub->blend, enable);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_enable_alpha);
+
+/**
+ * xilinx_drm_dp_sub_handle_vblank - Vblank handling wrapper
+ * @dp_sub: DP subsystem
+ *
+ * Trigger the registered vblank handler. This function is supposed to be
+ * called in the actual vblank handler.
+ */
+void xilinx_drm_dp_sub_handle_vblank(struct xilinx_drm_dp_sub *dp_sub)
+{
+ if (dp_sub->vblank_fn)
+ dp_sub->vblank_fn(dp_sub->vblank_data);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_handle_vblank);
+
+/**
+ * xilinx_drm_dp_sub_enable_vblank - Enable the vblank handling
+ * @dp_sub: DP subsystem
+ * @vblank_fn: callback to be called on vblank event
+ * @vblank_data: data to be used in @vblank_fn
+ *
+ * This function register the vblank handler, and the handler will be triggered
+ * on vblank event after.
+ */
+void xilinx_drm_dp_sub_enable_vblank(struct xilinx_drm_dp_sub *dp_sub,
+ void (*vblank_fn)(void *),
+ void *vblank_data)
+{
+ dp_sub->vblank_fn = vblank_fn;
+ dp_sub->vblank_data = vblank_data;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_enable_vblank);
+
+/**
+ * xilinx_drm_dp_sub_disable_vblank - Disable the vblank handling
+ * @dp_sub: DP subsystem
+ *
+ * Disable the vblank handler. The vblank handler and data are unregistered.
+ */
+void xilinx_drm_dp_sub_disable_vblank(struct xilinx_drm_dp_sub *dp_sub)
+{
+ dp_sub->vblank_fn = NULL;
+ dp_sub->vblank_data = NULL;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_disable_vblank);
+
+/**
+ * xilinx_drm_dp_sub_enable - Enable the DP subsystem
+ * @dp_sub: DP subsystem
+ *
+ * Enable the DP subsystem.
+ */
+void xilinx_drm_dp_sub_enable(struct xilinx_drm_dp_sub *dp_sub)
+{
+ const struct xilinx_drm_dp_sub_fmt *vid_fmt;
+ const struct xilinx_drm_dp_sub_fmt *gfx_fmt;
+
+ vid_fmt = dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID].fmt;
+ gfx_fmt = dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX].fmt;
+ xilinx_drm_dp_sub_av_buf_enable(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_av_buf_init_fmts(&dp_sub->av_buf, vid_fmt, gfx_fmt);
+ xilinx_drm_dp_sub_av_buf_init_sf(&dp_sub->av_buf, vid_fmt, gfx_fmt);
+ xilinx_drm_dp_sub_av_buf_set_vid_clock_src(&dp_sub->av_buf,
+ !dp_sub->vid_clk_pl);
+ xilinx_drm_dp_sub_av_buf_set_vid_timing_src(&dp_sub->av_buf, true);
+ xilinx_drm_dp_sub_av_buf_set_aud_clock_src(&dp_sub->av_buf, true);
+ xilinx_drm_dp_sub_av_buf_enable_buf(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_av_buf_enable_aud(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_aud_init(&dp_sub->aud);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_enable);
+
+/**
+ * xilinx_drm_dp_sub_enable - Disable the DP subsystem
+ * @dp_sub: DP subsystem
+ *
+ * Disable the DP subsystem.
+ */
+void xilinx_drm_dp_sub_disable(struct xilinx_drm_dp_sub *dp_sub)
+{
+ xilinx_drm_dp_sub_aud_deinit(&dp_sub->aud);
+ xilinx_drm_dp_sub_av_buf_disable_aud(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_av_buf_disable_buf(&dp_sub->av_buf);
+ xilinx_drm_dp_sub_av_buf_disable(&dp_sub->av_buf);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_disable);
+
+/* DP subsystem initialization functions */
+
+/**
+ * xilinx_drm_dp_sub_of_get - Get the DP subsystem instance
+ * @np: parent device node
+ *
+ * This function searches and returns a DP subsystem structure for
+ * the parent device node, @np. The DP subsystem node should be a child node of
+ * @np, with 'xlnx,dp-sub' property pointing to the DP device node. An instance
+ * can be shared by multiple users.
+ *
+ * Return: corresponding DP subsystem structure if found. NULL if
+ * the device node doesn't have 'xlnx,dp-sub' property, or -EPROBE_DEFER error
+ * pointer if the the DP subsystem isn't found.
+ */
+struct xilinx_drm_dp_sub *xilinx_drm_dp_sub_of_get(struct device_node *np)
+{
+ struct device_node *xilinx_drm_dp_sub_node;
+ struct xilinx_drm_dp_sub *found = NULL;
+ struct xilinx_drm_dp_sub *dp_sub;
+
+ if (!of_find_property(np, "xlnx,dp-sub", NULL))
+ return NULL;
+
+ xilinx_drm_dp_sub_node = of_parse_phandle(np, "xlnx,dp-sub", 0);
+ if (!xilinx_drm_dp_sub_node)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&xilinx_drm_dp_sub_lock);
+ list_for_each_entry(dp_sub, &xilinx_drm_dp_sub_list, list) {
+ if (dp_sub->dev->of_node == xilinx_drm_dp_sub_node) {
+ found = dp_sub;
+ break;
+ }
+ }
+ mutex_unlock(&xilinx_drm_dp_sub_lock);
+
+ of_node_put(xilinx_drm_dp_sub_node);
+
+ if (!found)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return found;
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_of_get);
+
+/**
+ * xilinx_drm_dp_sub_put - Put the DP subsystem instance
+ * @dp_sub: DP subsystem
+ *
+ * Put the DP subsystem instance @dp_sub.
+ */
+void xilinx_drm_dp_sub_put(struct xilinx_drm_dp_sub *dp_sub)
+{
+ /* no-op */
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_dp_sub_put);
+
+/**
+ * xilinx_drm_dp_register_device - Register the DP subsystem to the global list
+ * @dp_sub: DP subsystem
+ *
+ * Register the DP subsystem instance to the global list
+ */
+static void xilinx_drm_dp_sub_register_device(struct xilinx_drm_dp_sub *dp_sub)
+{
+ mutex_lock(&xilinx_drm_dp_sub_lock);
+ list_add_tail(&dp_sub->list, &xilinx_drm_dp_sub_list);
+ mutex_unlock(&xilinx_drm_dp_sub_lock);
+}
+
+/**
+ * xilinx_drm_dp_register_device - Unregister the DP subsystem instance
+ * @dp_sub: DP subsystem
+ *
+ * Unregister the DP subsystem instance from the global list
+ */
+static void
+xilinx_drm_dp_sub_unregister_device(struct xilinx_drm_dp_sub *dp_sub)
+{
+ mutex_lock(&xilinx_drm_dp_sub_lock);
+ list_del(&dp_sub->list);
+ mutex_unlock(&xilinx_drm_dp_sub_lock);
+}
+
+/**
+ * xilinx_drm_dp_sub_parse_of - Parse the DP subsystem device tree node
+ * @dp_sub: DP subsystem
+ *
+ * Parse the DP subsystem device tree node.
+ *
+ * Return: 0 on success, or the corresponding error code.
+ */
+static int xilinx_drm_dp_sub_parse_of(struct xilinx_drm_dp_sub *dp_sub)
+{
+ struct device_node *node = dp_sub->dev->of_node;
+ struct xilinx_drm_dp_sub_layer *layer;
+ const char *string;
+ u32 fmt, i, size;
+ int ret;
+
+ ret = of_property_read_string(node, "xlnx,output-fmt", &string);
+ if (ret < 0) {
+ dev_err(dp_sub->dev, "No colormetry in DT\n");
+ return ret;
+ }
+
+ if (strcmp(string, "rgb") == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB;
+ } else if (strcmp(string, "ycrcb444") == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR444;
+ } else if (strcmp(string, "ycrcb422") == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YCBCR422;
+ fmt |= XILINX_DP_SUB_V_BLEND_OUTPUT_EN_DOWNSAMPLE;
+ } else if (strcmp(string, "yonly") == 0) {
+ fmt = XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_YONLY;
+ } else {
+ dev_err(dp_sub->dev, "Invalid output format in DT\n");
+ return -EINVAL;
+ }
+
+ xilinx_drm_writel(dp_sub->blend.base,
+ XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT, fmt);
+
+ if (fmt != XILINX_DP_SUB_V_BLEND_OUTPUT_VID_FMT_RGB) {
+ u16 sdtv_coeffs[] = { 0x4c9, 0x864, 0x1d3,
+ 0x7d4d, 0x7ab3, 0x800,
+ 0x800, 0x794d, 0x7eb3 };
+ u32 full_range_offsets[] = { 0x0, 0x8000000, 0x8000000 };
+ u32 offset, i;
+
+ /* Hardcode SDTV coefficients. Can be runtime configurable */
+ offset = XILINX_DP_SUB_V_BLEND_RGB2YCBCR_COEFF0;
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_COEFF; i++)
+ xilinx_drm_writel(dp_sub->blend.base, offset + i * 4,
+ sdtv_coeffs[i]);
+
+ offset = XILINX_DP_SUB_V_BLEND_LUMA_OUTCSC_OFFSET;
+ for (i = 0; i < XILINX_DP_SUB_V_BLEND_NUM_OFFSET; i++)
+ xilinx_drm_writel(dp_sub->blend.base, offset + i * 4,
+ full_range_offsets[i]);
+ }
+
+ if (of_property_read_bool(node, "xlnx,vid-primary"))
+ dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID].primary = true;
+ else
+ dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX].primary = true;
+
+ ret = of_property_read_string(node, "xlnx,vid-fmt", &string);
+ if (!ret) {
+ layer = &dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_VID];
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(dp_sub->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+
+ for (i = 0; i < layer->num_fmts; i++) {
+ const struct xilinx_drm_dp_sub_fmt *fmt =
+ &av_buf_vid_fmts[i];
+
+ if (strcmp(string, fmt->name) == 0)
+ layer->fmt = fmt;
+
+ layer->drm_fmts[i] = fmt->drm_fmt;
+ }
+
+ if (!layer->fmt) {
+ dev_info(dp_sub->dev, "Invalid vid-fmt in DT\n");
+ layer->fmt = &av_buf_vid_fmts[0];
+ }
+ }
+
+ ret = of_property_read_string(node, "xlnx,gfx-fmt", &string);
+ if (!ret) {
+ layer = &dp_sub->layers[XILINX_DRM_DP_SUB_LAYER_GFX];
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(dp_sub->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+
+ for (i = 0; i < layer->num_fmts; i++) {
+ const struct xilinx_drm_dp_sub_fmt *fmt =
+ &av_buf_gfx_fmts[i];
+
+ if (strcmp(string, fmt->name) == 0)
+ layer->fmt = fmt;
+
+ layer->drm_fmts[i] = fmt->drm_fmt;
+ }
+
+ if (!layer->fmt) {
+ dev_info(dp_sub->dev, "Invalid vid-fmt in DT\n");
+ layer->fmt = &av_buf_gfx_fmts[0];
+ }
+ }
+
+ dp_sub->vid_clk_pl = of_property_read_bool(node, "xlnx,vid-clk-pl");
+
+ return 0;
+}
+
+static int xilinx_drm_dp_sub_probe(struct platform_device *pdev)
+{
+ struct xilinx_drm_dp_sub *dp_sub;
+ struct resource *res;
+ int ret;
+
+ dp_sub = devm_kzalloc(&pdev->dev, sizeof(*dp_sub), GFP_KERNEL);
+ if (!dp_sub)
+ return -ENOMEM;
+
+ dp_sub->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "blend");
+ dp_sub->blend.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dp_sub->blend.base))
+ return PTR_ERR(dp_sub->blend.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "av_buf");
+ dp_sub->av_buf.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dp_sub->av_buf.base))
+ return PTR_ERR(dp_sub->av_buf.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud");
+ dp_sub->aud.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dp_sub->aud.base))
+ return PTR_ERR(dp_sub->aud.base);
+
+ dp_sub->layers[0].id = XILINX_DRM_DP_SUB_LAYER_VID;
+ dp_sub->layers[0].offset = 0;
+ dp_sub->layers[0].avail = true;
+ dp_sub->layers[0].other = &dp_sub->layers[1];
+
+ dp_sub->layers[1].id = XILINX_DRM_DP_SUB_LAYER_GFX;
+ dp_sub->layers[1].offset = 4;
+ dp_sub->layers[1].avail = true;
+ dp_sub->layers[1].other = &dp_sub->layers[0];
+
+ ret = xilinx_drm_dp_sub_parse_of(dp_sub);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, dp_sub);
+
+ xilinx_drm_dp_sub_register_device(dp_sub);
+
+ xilinx_dp_sub_debugfs_init(dp_sub);
+
+ dev_info(dp_sub->dev, "Xilinx DisplayPort Subsystem is probed\n");
+
+ return 0;
+}
+
+static int xilinx_drm_dp_sub_remove(struct platform_device *pdev)
+{
+ struct xilinx_drm_dp_sub *dp_sub = platform_get_drvdata(pdev);
+
+ xilinx_drm_dp_sub_unregister_device(dp_sub);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_drm_dp_sub_of_id_table[] = {
+ { .compatible = "xlnx,dp-sub" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_drm_dp_sub_of_id_table);
+
+static struct platform_driver xilinx_drm_dp_sub_driver = {
+ .driver = {
+ .name = "xilinx-drm-dp-sub",
+ .of_match_table = xilinx_drm_dp_sub_of_id_table,
+ },
+ .probe = xilinx_drm_dp_sub_probe,
+ .remove = xilinx_drm_dp_sub_remove,
+};
+
+module_platform_driver(xilinx_drm_dp_sub_driver);
+
+MODULE_DESCRIPTION("Xilinx DisplayPort Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.h b/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.h
new file mode 100644
index 000000000000..b86e74622541
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_dp_sub.h
@@ -0,0 +1,69 @@
+/*
+ * DisplayPort subsystem header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2014 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_DP_SUB_H_
+#define _XILINX_DRM_DP_SUB_H_
+
+#define XILINX_DRM_DP_SUB_NUM_LAYERS 2
+#define XILINX_DRM_DP_SUB_MAX_WIDTH 4096
+#define XILINX_DRM_DP_SUB_MAX_ALPHA 255
+
+struct drm_device;
+struct xilinx_drm_dp_sub;
+struct xilinx_drm_dp_sub_layer;
+
+int xilinx_drm_dp_sub_layer_check_size(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 width, u32 height);
+int xilinx_drm_dp_sub_layer_set_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 drm_fmt);
+u32 xilinx_drm_dp_sub_layer_get_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer);
+void xilinx_drm_dp_sub_layer_get_fmts(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer,
+ u32 **drm_fmts,
+ unsigned int *num_fmts);
+void xilinx_drm_dp_sub_layer_enable(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer);
+void xilinx_drm_dp_sub_layer_disable(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer);
+struct xilinx_drm_dp_sub_layer *
+xilinx_drm_dp_sub_layer_get(struct xilinx_drm_dp_sub *dp_sub, bool priv);
+void xilinx_drm_dp_sub_layer_put(struct xilinx_drm_dp_sub *dp_sub,
+ struct xilinx_drm_dp_sub_layer *layer);
+
+int xilinx_drm_dp_sub_set_output_fmt(struct xilinx_drm_dp_sub *dp_sub,
+ u32 drm_fmt);
+void xilinx_drm_dp_sub_set_bg_color(struct xilinx_drm_dp_sub *dp_sub,
+ u32 c0, u32 c1, u32 c2);
+void xilinx_drm_dp_sub_set_alpha(struct xilinx_drm_dp_sub *dp_sub, u32 alpha);
+void
+xilinx_drm_dp_sub_enable_alpha(struct xilinx_drm_dp_sub *dp_sub, bool enable);
+
+void xilinx_drm_dp_sub_enable_vblank(struct xilinx_drm_dp_sub *dp_sub,
+ void (*vblank_fn)(void *),
+ void *vblank_data);
+void xilinx_drm_dp_sub_disable_vblank(struct xilinx_drm_dp_sub *dp_sub);
+void xilinx_drm_dp_sub_handle_vblank(struct xilinx_drm_dp_sub *dp_sub);
+void xilinx_drm_dp_sub_enable(struct xilinx_drm_dp_sub *dp_sub);
+void xilinx_drm_dp_sub_disable(struct xilinx_drm_dp_sub *dp_sub);
+
+struct xilinx_drm_dp_sub *xilinx_drm_dp_sub_of_get(struct device_node *np);
+void xilinx_drm_dp_sub_put(struct xilinx_drm_dp_sub *dp_sub);
+
+#endif /* _XILINX_DRM_DP_SUB_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_drv.c b/drivers/gpu/drm/xilinx/xilinx_drm_drv.c
new file mode 100644
index 000000000000..52706757ffe0
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_drv.c
@@ -0,0 +1,614 @@
+/*
+ * Xilinx DRM KMS support for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include "xilinx_drm_connector.h"
+#include "xilinx_drm_crtc.h"
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_encoder.h"
+#include "xilinx_drm_fb.h"
+#include "xilinx_drm_gem.h"
+
+#define DRIVER_NAME "xilinx_drm"
+#define DRIVER_DESC "Xilinx DRM KMS support for Xilinx"
+#define DRIVER_DATE "20130509"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static uint xilinx_drm_fbdev_vres = 2;
+module_param_named(fbdev_vres, xilinx_drm_fbdev_vres, uint, 0444);
+MODULE_PARM_DESC(fbdev_vres,
+ "fbdev virtual resolution multiplier for fb (default: 2)");
+
+/*
+ * TODO: The possible pipeline configurations are numerous with Xilinx soft IPs.
+ * It's not too bad for now, but the more proper way(Common Display Framework,
+ * or some internal abstraction) should be considered, when it reaches a point
+ * that such thing is required.
+ */
+
+struct xilinx_drm_private {
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ struct drm_fb_helper *fb;
+ struct platform_device *pdev;
+ bool is_master;
+};
+
+/**
+ * struct xilinx_video_format_desc - Xilinx Video IP video format description
+ * @name: Xilinx video format name
+ * @depth: color depth
+ * @bpp: bits per pixel
+ * @xilinx_format: xilinx format code
+ * @drm_format: drm format code
+ */
+struct xilinx_video_format_desc {
+ const char *name;
+ unsigned int depth;
+ unsigned int bpp;
+ unsigned int xilinx_format;
+ u32 drm_format;
+};
+
+static const struct xilinx_video_format_desc xilinx_video_formats[] = {
+ { "yuv420", 16, 16, XILINX_VIDEO_FORMAT_YUV420, DRM_FORMAT_YUV420 },
+ { "uvy422", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_UYVY },
+ { "vuy422", 16, 16, XILINX_VIDEO_FORMAT_YUV422, DRM_FORMAT_VYUY },
+ { "yuv422", 16, 16, XILINX_VIDEO_FORMAT_YUV422, DRM_FORMAT_YUYV },
+ { "yvu422", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_YVYU },
+ { "yuv444", 24, 24, XILINX_VIDEO_FORMAT_YUV444, DRM_FORMAT_YUV444 },
+ { "nv12", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV12 },
+ { "nv21", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV21 },
+ { "nv16", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV16 },
+ { "nv61", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_NV61 },
+ { "abgr1555", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ABGR1555 },
+ { "argb1555", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ARGB1555 },
+ { "rgba4444", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_RGBA4444 },
+ { "bgra4444", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_BGRA4444 },
+ { "bgr565", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_BGR565 },
+ { "rgb565", 16, 16, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_RGB565 },
+ { "bgr888", 24, 24, XILINX_VIDEO_FORMAT_RGB, DRM_FORMAT_BGR888 },
+ { "rgb888", 24, 24, XILINX_VIDEO_FORMAT_RGB, DRM_FORMAT_RGB888 },
+ { "xbgr8888", 24, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_XBGR8888 },
+ { "xrgb8888", 24, 32, XILINX_VIDEO_FORMAT_XRGB, DRM_FORMAT_XRGB8888 },
+ { "abgr8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ABGR8888 },
+ { "argb8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_ARGB8888 },
+ { "bgra8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_BGRA8888 },
+ { "rgba8888", 32, 32, XILINX_VIDEO_FORMAT_NONE, DRM_FORMAT_RGBA8888 },
+};
+
+/**
+ * xilinx_drm_check_format - Check if the given format is supported
+ * @drm: DRM device
+ * @fourcc: format fourcc
+ *
+ * Check if the given format @fourcc is supported by the current pipeline
+ *
+ * Return: true if the format is supported, or false
+ */
+bool xilinx_drm_check_format(struct drm_device *drm, u32 fourcc)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ return xilinx_drm_crtc_check_format(private->crtc, fourcc);
+}
+
+/**
+ * xilinx_drm_get_format - Get the current device format
+ * @drm: DRM device
+ *
+ * Get the current format of pipeline
+ *
+ * Return: the corresponding DRM_FORMAT_XXX
+ */
+u32 xilinx_drm_get_format(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ return xilinx_drm_crtc_get_format(private->crtc);
+}
+
+/**
+ * xilinx_drm_get_align - Get the alignment value for pitch
+ * @drm: DRM object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+unsigned int xilinx_drm_get_align(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ return xilinx_drm_crtc_get_align(private->crtc);
+}
+
+/* poll changed handler */
+static void xilinx_drm_output_poll_changed(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_fb_hotplug_event(private->fb);
+}
+
+static const struct drm_mode_config_funcs xilinx_drm_mode_config_funcs = {
+ .fb_create = xilinx_drm_fb_create,
+ .output_poll_changed = xilinx_drm_output_poll_changed,
+};
+
+/* enable vblank */
+static int xilinx_drm_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_crtc_enable_vblank(private->crtc);
+
+ return 0;
+}
+
+/* disable vblank */
+static void xilinx_drm_disable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_crtc_disable_vblank(private->crtc);
+}
+
+/* initialize mode config */
+static void xilinx_drm_mode_config_init(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width =
+ xilinx_drm_crtc_get_max_width(private->crtc);
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.funcs = &xilinx_drm_mode_config_funcs;
+}
+
+/* convert xilinx format to drm format by code */
+int xilinx_drm_format_by_code(unsigned int xilinx_format, u32 *drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (format->xilinx_format == xilinx_format) {
+ *drm_format = format->drm_format;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("Unknown Xilinx video format: %d\n", xilinx_format);
+
+ return -EINVAL;
+}
+
+/* convert xilinx format to drm format by name */
+int xilinx_drm_format_by_name(const char *name, u32 *drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (strcmp(format->name, name) == 0) {
+ *drm_format = format->drm_format;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("Unknown Xilinx video format: %s\n", name);
+
+ return -EINVAL;
+}
+
+/* get bpp of given format */
+unsigned int xilinx_drm_format_bpp(u32 drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (format->drm_format == drm_format)
+ return format->bpp;
+ }
+
+ return 0;
+}
+
+/* get color depth of given format */
+unsigned int xilinx_drm_format_depth(u32 drm_format)
+{
+ const struct xilinx_video_format_desc *format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_video_formats); i++) {
+ format = &xilinx_video_formats[i];
+ if (format->drm_format == drm_format)
+ return format->depth;
+ }
+
+ return 0;
+}
+
+static int xilinx_drm_bind(struct device *dev)
+{
+ struct xilinx_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm = private->drm;
+
+ return component_bind_all(dev, drm);
+}
+
+static void xilinx_drm_unbind(struct device *dev)
+{
+ dev_set_drvdata(dev, NULL);
+}
+
+static const struct component_master_ops xilinx_drm_ops = {
+ .bind = xilinx_drm_bind,
+ .unbind = xilinx_drm_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ struct device_node *np = data;
+
+ return dev->of_node == np;
+}
+
+static int xilinx_drm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct xilinx_drm_private *private = dev->dev_private;
+
+ /* This is a hack way to allow the root user to run as a master */
+ if (!(drm_is_primary_client(file) && !dev->master) &&
+ !file->is_master && capable(CAP_SYS_ADMIN)) {
+ file->is_master = 1;
+ private->is_master = true;
+ }
+
+ return 0;
+}
+
+static int xilinx_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file = filp->private_data;
+ struct drm_minor *minor = file->minor;
+ struct drm_device *drm = minor->dev;
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ if (private->is_master) {
+ private->is_master = false;
+ file->is_master = 0;
+ }
+
+ return drm_release(inode, filp);
+}
+
+/* restore the default mode when xilinx drm is released */
+static void xilinx_drm_lastclose(struct drm_device *drm)
+{
+ struct xilinx_drm_private *private = drm->dev_private;
+
+ xilinx_drm_crtc_restore(private->crtc);
+
+ xilinx_drm_fb_restore_mode(private->fb);
+}
+
+static const struct file_operations xilinx_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = xilinx_drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver xilinx_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
+ DRIVER_PRIME | DRIVER_LEGACY,
+ .open = xilinx_drm_open,
+ .lastclose = xilinx_drm_lastclose,
+
+ .enable_vblank = xilinx_drm_enable_vblank,
+ .disable_vblank = xilinx_drm_disable_vblank,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = xilinx_drm_gem_cma_dumb_create,
+
+ .fops = &xilinx_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+#if defined(CONFIG_PM_SLEEP)
+/* suspend xilinx drm */
+static int xilinx_drm_pm_suspend(struct device *dev)
+{
+ struct xilinx_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm = private->drm;
+ struct drm_connector *connector;
+
+ drm_kms_helper_poll_disable(drm);
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ int old_dpms = connector->dpms;
+
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector,
+ DRM_MODE_DPMS_SUSPEND);
+
+ connector->dpms = old_dpms;
+ }
+ drm_modeset_unlock_all(drm);
+
+ return 0;
+}
+
+/* resume xilinx drm */
+static int xilinx_drm_pm_resume(struct device *dev)
+{
+ struct xilinx_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm = private->drm;
+ struct drm_connector *connector;
+
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ if (connector->funcs->dpms) {
+ int dpms = connector->dpms;
+
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ connector->funcs->dpms(connector, dpms);
+ }
+ }
+ drm_modeset_unlock_all(drm);
+
+ drm_helper_resume_force_mode(drm);
+
+ drm_modeset_lock_all(drm);
+ drm_kms_helper_poll_enable(drm);
+ drm_modeset_unlock_all(drm);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops xilinx_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xilinx_drm_pm_suspend, xilinx_drm_pm_resume)
+};
+
+/* init xilinx drm platform */
+static int xilinx_drm_platform_probe(struct platform_device *pdev)
+{
+ struct xilinx_drm_private *private;
+ struct drm_device *drm;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ const struct drm_format_info *info;
+ struct device_node *encoder_node, *ep = NULL, *remote;
+ struct component_match *match = NULL;
+ unsigned int align, i = 0;
+ int ret;
+ u32 format;
+
+ drm = drm_dev_alloc(&xilinx_drm_driver, &pdev->dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ private = devm_kzalloc(drm->dev, sizeof(*private), GFP_KERNEL);
+ if (!private) {
+ ret = -ENOMEM;
+ goto err_drm;
+ }
+
+ drm_mode_config_init(drm);
+
+ /* create a xilinx crtc */
+ private->crtc = xilinx_drm_crtc_create(drm);
+ if (IS_ERR(private->crtc)) {
+ DRM_DEBUG_DRIVER("failed to create xilinx crtc\n");
+ ret = PTR_ERR(private->crtc);
+ goto err_config;
+ }
+
+ while ((encoder_node = of_parse_phandle(drm->dev->of_node,
+ "xlnx,encoder-slave", i))) {
+ encoder = xilinx_drm_encoder_create(drm, encoder_node);
+ of_node_put(encoder_node);
+ if (IS_ERR(encoder)) {
+ DRM_DEBUG_DRIVER("failed to create xilinx encoder\n");
+ ret = PTR_ERR(encoder);
+ goto err_config;
+ }
+
+ connector = xilinx_drm_connector_create(drm, encoder, i);
+ if (IS_ERR(connector)) {
+ DRM_DEBUG_DRIVER("failed to create xilinx connector\n");
+ ret = PTR_ERR(connector);
+ goto err_config;
+ }
+
+ i++;
+ }
+
+ while (1) {
+ ep = of_graph_get_next_endpoint(drm->dev->of_node, ep);
+ if (!ep)
+ break;
+
+ of_node_put(ep);
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
+ continue;
+ }
+
+ component_match_add(drm->dev, &match, compare_of, remote);
+ of_node_put(remote);
+ i++;
+ }
+
+ if (i == 0) {
+ DRM_ERROR("failed to get an encoder slave node\n");
+ return -ENODEV;
+ }
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize vblank\n");
+ goto err_master;
+ }
+
+ /* enable irq to enable vblank feature */
+ drm->irq_enabled = 1;
+
+ drm->dev_private = private;
+ private->drm = drm;
+ xilinx_drm_mode_config_init(drm);
+
+ format = xilinx_drm_crtc_get_format(private->crtc);
+ info = drm_format_info(format);
+ if (info && info->depth && info->cpp[0]) {
+ align = xilinx_drm_crtc_get_align(private->crtc);
+ private->fb = xilinx_drm_fb_init(drm, info->cpp[0] * 8, 1,
+ align, xilinx_drm_fbdev_vres);
+ if (IS_ERR(private->fb)) {
+ DRM_ERROR("failed to initialize drm fb\n");
+ private->fb = NULL;
+ }
+ } else {
+ dev_info(&pdev->dev, "fbdev is not initialized\n");
+ }
+
+ drm_kms_helper_poll_init(drm);
+
+ drm_helper_disable_unused_functions(drm);
+
+ platform_set_drvdata(pdev, private);
+
+ if (match) {
+ ret = component_master_add_with_match(drm->dev,
+ &xilinx_drm_ops, match);
+ if (ret)
+ goto err_master;
+ }
+
+ ret = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(sizeof(dma_addr_t) * 8));
+ if (ret) {
+ dev_info(&pdev->dev, "failed to set coherent mask (%zu)\n",
+ sizeof(dma_addr_t));
+ }
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto err_master;
+
+ return 0;
+
+err_master:
+ component_master_del(drm->dev, &xilinx_drm_ops);
+err_config:
+ drm_mode_config_cleanup(drm);
+ if (ret == -EPROBE_DEFER)
+ DRM_INFO("load() is defered & will be called again\n");
+err_drm:
+ drm_dev_put(drm);
+ return ret;
+}
+
+/* exit xilinx drm platform */
+static int xilinx_drm_platform_remove(struct platform_device *pdev)
+{
+ struct xilinx_drm_private *private = platform_get_drvdata(pdev);
+ struct drm_device *drm = private->drm;
+
+ component_master_del(drm->dev, &xilinx_drm_ops);
+ drm_kms_helper_poll_fini(drm);
+ xilinx_drm_fb_fini(private->fb);
+ drm_mode_config_cleanup(drm);
+ drm->dev_private = NULL;
+ drm_dev_put(private->drm);
+
+ return 0;
+}
+
+static void xilinx_drm_platform_shutdown(struct platform_device *pdev)
+{
+ struct xilinx_drm_private *private = platform_get_drvdata(pdev);
+
+ drm_put_dev(private->drm);
+}
+
+static const struct of_device_id xilinx_drm_of_match[] = {
+ { .compatible = "xlnx,drm", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_drm_of_match);
+
+static struct platform_driver xilinx_drm_private_driver = {
+ .probe = xilinx_drm_platform_probe,
+ .remove = xilinx_drm_platform_remove,
+ .shutdown = xilinx_drm_platform_shutdown,
+ .driver = {
+ .name = "xilinx-drm",
+ .pm = &xilinx_drm_pm_ops,
+ .of_match_table = xilinx_drm_of_match,
+ },
+};
+
+module_platform_driver(xilinx_drm_private_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DRM KMS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_drv.h b/drivers/gpu/drm/xilinx/xilinx_drm_drv.h
new file mode 100644
index 000000000000..b871d421df84
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_drv.h
@@ -0,0 +1,65 @@
+/*
+ * Xilinx DRM KMS Header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_H_
+#define _XILINX_DRM_H_
+
+enum xilinx_video_format {
+ XILINX_VIDEO_FORMAT_YUV422 = 0,
+ XILINX_VIDEO_FORMAT_YUV444 = 1,
+ XILINX_VIDEO_FORMAT_RGB = 2,
+ XILINX_VIDEO_FORMAT_YUV420 = 3,
+ XILINX_VIDEO_FORMAT_XRGB = 16,
+ XILINX_VIDEO_FORMAT_NONE = 32,
+};
+
+/* convert the xilinx format to the drm format */
+int xilinx_drm_format_by_code(unsigned int xilinx_format, u32 *drm_format);
+int xilinx_drm_format_by_name(const char *name, u32 *drm_format);
+
+unsigned int xilinx_drm_format_bpp(u32 drm_format);
+unsigned int xilinx_drm_format_depth(u32 drm_format);
+
+/* io write operations */
+static inline void xilinx_drm_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+/* io read operations */
+static inline u32 xilinx_drm_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static inline void xilinx_drm_clr(void __iomem *base, int offset, u32 clr)
+{
+ xilinx_drm_writel(base, offset, xilinx_drm_readl(base, offset) & ~clr);
+}
+
+static inline void xilinx_drm_set(void __iomem *base, int offset, u32 set)
+{
+ xilinx_drm_writel(base, offset, xilinx_drm_readl(base, offset) | set);
+}
+
+struct drm_device;
+
+bool xilinx_drm_check_format(struct drm_device *drm, uint32_t fourcc);
+uint32_t xilinx_drm_get_format(struct drm_device *drm);
+unsigned int xilinx_drm_get_align(struct drm_device *drm);
+
+#endif /* _XILINX_DRM_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_dsi.c b/drivers/gpu/drm/xilinx/xilinx_drm_dsi.c
new file mode 100644
index 000000000000..b168ee26a6fe
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_dsi.c
@@ -0,0 +1,808 @@
+/*
+ * Xilinx FPGA MIPI DSI Tx Controller driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Siva Rajesh J <siva.rajesh.jarugula@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+/* DSI Tx IP registers */
+#define XDSI_CCR 0x00
+#define XDSI_CCR_COREENB BIT(0)
+#define XDSI_CCR_CRREADY BIT(2)
+#define XDSI_PCR 0x04
+#define XDSI_PCR_VIDEOMODE(x) (((x) & 0x3) << 3)
+#define XDSI_PCR_VIDEOMODE_MASK (0x3 << 3)
+#define XDSI_PCR_VIDEOMODE_SHIFT 3
+#define XDSI_PCR_BLLPTYPE(x) ((x) << 5)
+#define XDSI_PCR_BLLPMODE(x) ((x) << 6)
+#define XDSI_PCR_EOTPENABLE(x) ((x) << 13)
+#define XDSI_GIER 0x20
+#define XDSI_ISR 0x24
+#define XDSI_IER 0x28
+#define XDSI_CMD 0x30
+#define XDSI_CMD_QUEUE_PACKET(x) (((x) & 0xffffff) << 0)
+#define XDSI_TIME1 0x50
+#define XDSI_TIME1_BLLP_BURST(x) (((x) & 0xffff) << 0)
+#define XDSI_TIME1_HSA(x) (((x) & 0xffff) << 16)
+#define XDSI_TIME2 0x54
+#define XDSI_TIME2_VACT(x) (((x) & 0xffff) << 0)
+#define XDSI_TIME2_HACT(x) (((x) & 0xffff) << 16)
+#define XDSI_HACT_MULTIPLIER GENMASK(1, 0)
+#define XDSI_TIME3 0x58
+#define XDSI_TIME3_HFP(x) (((x) & 0xffff) << 0)
+#define XDSI_TIME3_HBP(x) (((x) & 0xffff) << 16)
+#define XDSI_TIME4 0x5c
+#define XDSI_TIME4_VFP(x) (((x) & 0xff) << 0)
+#define XDSI_TIME4_VBP(x) (((x) & 0xff) << 8)
+#define XDSI_TIME4_VSA(x) (((x) & 0xff) << 16)
+#define XDSI_LTIME 0x60
+#define XDSI_BLLP_TIME 0x64
+#define XDSI_NUM_DATA_TYPES 4
+#define XDSI_VIDEO_MODE_SYNC_PULSE 0x0
+#define XDSI_VIDEO_MODE_SYNC_EVENT 0x1
+#define XDSI_VIDEO_MODE_BURST 0x2
+
+/*
+ * Used as a multiplication factor for HACT based on used
+ * DSI data type.
+ *
+ * e.g. for RGB666_L datatype and 1920x1080 resolution,
+ * the Hact (WC) would be as follows -
+ * 1920 pixels * 18 bits per pixel / 8 bits per byte
+ * = 1920 pixels * 2.25 bytes per pixel = 4320 bytes.
+ *
+ * Data Type - Multiplication factor
+ * RGB888 - 3
+ * RGB666_L - 2.25
+ * RGB666_P - 2.25
+ * RGB565 - 2
+ *
+ * Since the multiplication factor maybe a floating number,
+ * a 100x multiplication factor is used.
+ *
+ * XDSI_NUM_DATA_TYPES represents number of data types in the
+ * enum mipi_dsi_pixel_format in the MIPI DSI part of DRM framework.
+ */
+static const int xdsi_mul_factor[XDSI_NUM_DATA_TYPES] = {300, 225, 225, 200};
+
+/*
+ * struct xilinx_dsi - Core configuration DSI Tx subsystem device structure
+ * @drm_encoder: DRM encoder structure
+ * @dsi_host: DSI host device
+ * @connector: DRM connector structure
+ * @panel_node: MIPI DSI device panel node
+ * @panel: DRM panel structure
+ * @dev: device structure
+ * @iomem: Base address of DSI subsystem
+ * @lanes: number of active data lanes supported by DSI controller
+ * @mode_flags: DSI operation mode related flags
+ * @format: pixel format for video mode of DSI controller
+ * @vm: videomode data structure
+ * @mul_factor: multiplication factor for HACT timing parameter
+ * @eotp_prop: configurable EoTP DSI parameter
+ * @bllp_mode_prop: configurable BLLP mode DSI parameter
+ * @bllp_type_prop: configurable BLLP type DSI parameter
+ * @video_mode_prop: configurable Video mode DSI parameter
+ * @bllp_burst_time_prop: Configurable BLLP time for burst mode
+ * @cmd_queue_prop: configurable command queue
+ * @eotp_prop_val: configurable EoTP DSI parameter value
+ * @bllp_mode_prop_val: configurable BLLP mode DSI parameter value
+ * @bllp_type_prop_val: configurable BLLP type DSI parameter value
+ * @video_mode_prop_val: configurable Video mode DSI parameter value
+ * @bllp_burst_time_prop_val: Configurable BLLP time for burst mode value
+ * @cmd_queue_prop_val: configurable command queue value
+ */
+struct xilinx_dsi {
+ struct drm_encoder encoder;
+ struct mipi_dsi_host dsi_host;
+ struct drm_connector connector;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct device *dev;
+ void __iomem *iomem;
+ u32 lanes;
+ u32 mode_flags;
+ enum mipi_dsi_pixel_format format;
+ struct videomode vm;
+ u32 mul_factor;
+ struct drm_property *eotp_prop;
+ struct drm_property *bllp_mode_prop;
+ struct drm_property *bllp_type_prop;
+ struct drm_property *video_mode_prop;
+ struct drm_property *bllp_burst_time_prop;
+ struct drm_property *cmd_queue_prop;
+ bool eotp_prop_val;
+ bool bllp_mode_prop_val;
+ bool bllp_type_prop_val;
+ u32 video_mode_prop_val;
+ u32 bllp_burst_time_prop_val;
+ u32 cmd_queue_prop_val;
+};
+
+#define host_to_dsi(host) container_of(host, struct xilinx_dsi, dsi_host)
+#define connector_to_dsi(c) container_of(c, struct xilinx_dsi, connector)
+#define encoder_to_dsi(e) container_of(e, struct xilinx_dsi, encoder)
+
+static inline void xilinx_dsi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xilinx_dsi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xilinx_dsi_set_default_drm_properties - Configure DSI DRM
+ * properties with their default values
+ * @dsi: DSI structure having the updated user parameters
+ */
+static void
+xilinx_dsi_set_default_drm_properties(struct xilinx_dsi *dsi)
+{
+ drm_object_property_set_value(&dsi->connector.base, dsi->eotp_prop, 1);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->bllp_mode_prop, 0);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->bllp_type_prop, 0);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->video_mode_prop, 0);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->bllp_burst_time_prop, 0);
+ drm_object_property_set_value(&dsi->connector.base,
+ dsi->cmd_queue_prop, 0);
+}
+
+/**
+ * xilinx_dsi_set_config_parameters - Configure DSI Tx registers with parameters
+ * given from user application.
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI structure having drm_property parameters
+ * configured from user application and writes them into DSI IP registers.
+ */
+static void xilinx_dsi_set_config_parameters(struct xilinx_dsi *dsi)
+{
+ u32 reg = 0;
+
+ reg |= XDSI_PCR_EOTPENABLE(dsi->eotp_prop_val);
+ reg |= XDSI_PCR_VIDEOMODE(dsi->video_mode_prop_val);
+ reg |= XDSI_PCR_BLLPTYPE(dsi->bllp_type_prop_val);
+ reg |= XDSI_PCR_BLLPMODE(dsi->bllp_mode_prop_val);
+
+ xilinx_dsi_writel(dsi->iomem, XDSI_PCR, reg);
+
+ /* Configure the burst time if video mode is burst.
+ * HSA of TIME1 register is ignored in this mode.
+ */
+ if (dsi->video_mode_prop_val == XDSI_VIDEO_MODE_BURST) {
+ reg = XDSI_TIME1_BLLP_BURST(dsi->bllp_burst_time_prop_val);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_CMD_QUEUE_PACKET(dsi->cmd_queue_prop_val);
+ xilinx_dsi_writel(dsi->iomem, XDSI_CMD, reg);
+
+ dev_dbg(dsi->dev, "PCR register value is = %x\n",
+ xilinx_dsi_readl(dsi->iomem, XDSI_PCR));
+}
+
+/**
+ * xilinx_dsi_set_display_mode - Configure DSI timing registers
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function writes the timing parameters of DSI IP which are
+ * retrieved from panel timing values.
+ */
+static void xilinx_dsi_set_display_mode(struct xilinx_dsi *dsi)
+{
+ struct videomode *vm = &dsi->vm;
+ u32 reg, video_mode;
+
+ reg = xilinx_dsi_readl(dsi->iomem, XDSI_PCR);
+ video_mode = ((reg & XDSI_PCR_VIDEOMODE_MASK) >>
+ XDSI_PCR_VIDEOMODE_SHIFT);
+
+ /* configure the HSA value only if non_burst_sync_pluse video mode */
+ if ((!video_mode) &&
+ (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)) {
+ reg = XDSI_TIME1_HSA(vm->hsync_len);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_TIME4_VFP(vm->vfront_porch) |
+ XDSI_TIME4_VBP(vm->vback_porch) |
+ XDSI_TIME4_VSA(vm->vsync_len);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME4, reg);
+
+ reg = XDSI_TIME3_HFP(vm->hfront_porch) |
+ XDSI_TIME3_HBP(vm->hback_porch);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME3, reg);
+
+ dev_dbg(dsi->dev, "mul factor for parsed datatype is = %d\n",
+ (dsi->mul_factor) / 100);
+
+ /* The HACT parameter received from panel timing values should be
+ * divisible by 4. The reason for this is, the word count given as
+ * input to DSI controller is HACT * mul_factor. The mul_factor is
+ * 3, 2.25, 2.25, 2 respectively for RGB888, RGB666_L, RGB666_P and
+ * RGB565.
+ * e.g. for RGB666_L color format and 1080p, the word count is
+ * 1920*2.25 = 4320 which is divisible by 4 and it is a valid input
+ * to DSI controller. Based on this 2.25 mul factor, we come up with
+ * the division factor of (XDSI_HACT_MULTIPLIER) as 4 for checking
+ */
+ if (((vm->hactive) & XDSI_HACT_MULTIPLIER) != 0)
+ dev_alert(dsi->dev, "Incorrect HACT will be programmed\n");
+
+ reg = XDSI_TIME2_HACT((vm->hactive) * (dsi->mul_factor) / 100) |
+ XDSI_TIME2_VACT(vm->vactive);
+ xilinx_dsi_writel(dsi->iomem, XDSI_TIME2, reg);
+
+ dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
+}
+
+/**
+ * xilinx_dsi_set_display_enable - Enables the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xilinx_dsi_set_display_enable(struct xilinx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xilinx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg |= XDSI_CCR_COREENB;
+
+ xilinx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "MIPI DSI Tx controller is enabled.\n");
+}
+
+/**
+ * xilinx_dsi_set_display_disable - Disable the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xilinx_dsi_set_display_disable(struct xilinx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xilinx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg &= ~XDSI_CCR_COREENB;
+
+ xilinx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "DSI Tx is disabled. reset regs to default values\n");
+}
+
+static void xilinx_dsi_encoder_dpms(struct drm_encoder *encoder,
+ int mode)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+
+ dev_dbg(dsi->dev, "encoder dpms state: %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ xilinx_dsi_set_display_enable(dsi);
+ break;
+ default:
+ xilinx_dsi_set_display_disable(dsi);
+ xilinx_dsi_set_default_drm_properties(dsi);
+ break;
+ }
+}
+
+/**
+ * xilinx_dsi_connector_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @base_connector: pointer Xilinx DSI connector
+ * @property: pointer to the drm_property structure
+ * @value: DSI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the DSI structure property varabiles with the values.
+ * These values are later used to configure the DSI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int
+xilinx_dsi_connector_set_property(struct drm_connector *base_connector,
+ struct drm_property *property,
+ u64 value)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(base_connector);
+
+ dev_dbg(dsi->dev, "property name = %s, value = %lld\n",
+ property->name, value);
+
+ if (property == dsi->eotp_prop)
+ dsi->eotp_prop_val = !!value;
+ else if (property == dsi->bllp_mode_prop)
+ dsi->bllp_mode_prop_val = !!value;
+ else if (property == dsi->bllp_type_prop)
+ dsi->bllp_type_prop_val = !!value;
+ else if (property == dsi->video_mode_prop)
+ dsi->video_mode_prop_val = (unsigned int)value;
+ else if (property == dsi->bllp_burst_time_prop)
+ dsi->bllp_burst_time_prop_val = (unsigned int)value;
+ else if (property == dsi->cmd_queue_prop)
+ dsi->cmd_queue_prop_val = (unsigned int)value;
+ else
+ return -EINVAL;
+
+ xilinx_dsi_set_config_parameters(dsi);
+
+ return 0;
+}
+
+static int xilinx_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ u32 panel_lanes;
+ struct xilinx_dsi *dsi = host_to_dsi(host);
+
+ panel_lanes = device->lanes;
+ dsi->mode_flags = device->mode_flags;
+ dsi->panel_node = device->dev.of_node;
+
+ if (panel_lanes != dsi->lanes) {
+ dev_err(dsi->dev, "Mismatch of lanes. panel = %d, DSI = %d\n",
+ panel_lanes, dsi->lanes);
+ return -EINVAL;
+ }
+
+ if ((dsi->lanes > 4) || (dsi->lanes < 1)) {
+ dev_err(dsi->dev, "%d lanes : invalid xlnx,dsi-num-lanes\n",
+ dsi->lanes);
+ return -EINVAL;
+ }
+
+ if (device->format != dsi->format) {
+ dev_err(dsi->dev, "Mismatch of format. panel = %d, DSI = %d\n",
+ device->format, dsi->format);
+ return -EINVAL;
+ }
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static int xilinx_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct xilinx_dsi *dsi = host_to_dsi(host);
+
+ dsi->panel_node = NULL;
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops xilinx_dsi_ops = {
+ .attach = xilinx_dsi_host_attach,
+ .detach = xilinx_dsi_host_detach,
+};
+
+static int xilinx_dsi_connector_dpms(struct drm_connector *connector,
+ int mode)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(connector);
+ int ret;
+
+ dev_dbg(dsi->dev, "connector dpms state: %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ ret = drm_panel_prepare(dsi->panel);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0) {
+ drm_panel_unprepare(dsi->panel);
+ dev_err(dsi->dev, "DRM panel not enabled. power off DSI\n");
+ return ret;
+ }
+ break;
+ default:
+ drm_panel_disable(dsi->panel);
+ drm_panel_unprepare(dsi->panel);
+ break;
+ }
+
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static enum drm_connector_status
+xilinx_dsi_detect(struct drm_connector *connector, bool force)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(connector);
+
+ if (!dsi->panel) {
+ dsi->panel = of_drm_find_panel(dsi->panel_node);
+ if (dsi->panel)
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ } else if (!dsi->panel_node) {
+ xilinx_dsi_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_panel_detach(dsi->panel);
+ dsi->panel = NULL;
+ }
+
+ if (dsi->panel)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static void xilinx_dsi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xilinx_dsi_connector_funcs = {
+ .dpms = xilinx_dsi_connector_dpms,
+ .detect = xilinx_dsi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xilinx_dsi_connector_destroy,
+ .set_property = xilinx_dsi_connector_set_property,
+};
+
+static int xilinx_dsi_get_modes(struct drm_connector *connector)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(connector);
+
+ if (dsi->panel)
+ return dsi->panel->funcs->get_modes(dsi->panel);
+
+ return 0;
+}
+
+static struct drm_encoder *
+xilinx_dsi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_dsi(connector)->encoder);
+}
+
+static struct drm_connector_helper_funcs xilinx_dsi_connector_helper_funcs = {
+ .get_modes = xilinx_dsi_get_modes,
+ .best_encoder = xilinx_dsi_best_encoder,
+};
+
+/**
+ * xilinx_drm_dsi_connector_create_property - create DSI connector properties
+ *
+ * @base_connector: pointer to Xilinx DSI connector
+ *
+ * This function takes the xilinx DSI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void
+xilinx_drm_dsi_connector_create_property(struct drm_connector *base_connector)
+{
+ struct drm_device *dev = base_connector->dev;
+ struct xilinx_dsi *dsi = connector_to_dsi(base_connector);
+
+ dsi->eotp_prop = drm_property_create_bool(dev, 1, "eotp");
+ dsi->video_mode_prop = drm_property_create_range(dev, 0,
+ "video_mode", 0, 2);
+ dsi->bllp_mode_prop = drm_property_create_bool(dev, 0, "bllp_mode");
+ dsi->bllp_type_prop = drm_property_create_bool(dev, 0, "bllp_type");
+ dsi->bllp_burst_time_prop = drm_property_create_range(dev, 0,
+ "bllp_burst_time", 0, 0xFFFF);
+ dsi->cmd_queue_prop = drm_property_create_range(dev, 0,
+ "cmd_queue", 0, 0xFFFFFF);
+}
+
+/**
+ * xilinx_drm_dsi_connector_attach_property - attach DSI connector
+ * properties
+ *
+ * @base_connector: pointer to Xilinx DSI connector
+ */
+static void
+xilinx_drm_dsi_connector_attach_property(struct drm_connector *base_connector)
+{
+ struct xilinx_dsi *dsi = connector_to_dsi(base_connector);
+ struct drm_mode_object *obj = &base_connector->base;
+
+ if (dsi->eotp_prop)
+ drm_object_attach_property(obj, dsi->eotp_prop, 1);
+
+ if (dsi->video_mode_prop)
+ drm_object_attach_property(obj, dsi->video_mode_prop, 0);
+
+ if (dsi->bllp_burst_time_prop)
+ drm_object_attach_property(&base_connector->base,
+ dsi->bllp_burst_time_prop, 0);
+
+ if (dsi->bllp_mode_prop)
+ drm_object_attach_property(&base_connector->base,
+ dsi->bllp_mode_prop, 0);
+
+ if (dsi->bllp_type_prop)
+ drm_object_attach_property(&base_connector->base,
+ dsi->bllp_type_prop, 0);
+
+ if (dsi->cmd_queue_prop)
+ drm_object_attach_property(&base_connector->base,
+ dsi->cmd_queue_prop, 0);
+}
+
+static int xilinx_dsi_create_connector(struct drm_encoder *encoder)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+ struct drm_connector *connector = &dsi->connector;
+ int ret;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xilinx_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (ret) {
+ dev_err(dsi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xilinx_dsi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xilinx_drm_dsi_connector_create_property(connector);
+ xilinx_drm_dsi_connector_attach_property(connector);
+
+ return 0;
+}
+
+static bool xilinx_dsi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/**
+ * xilinx_dsi_mode_set - derive the DSI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @mode: DRM kernel-internal display mode structure
+ * @adjusted_mode: DSI panel timing parameters
+ *
+ * This function derives the DSI IP timing parameters from the timing
+ * values given in the attached panel driver.
+ */
+static void xilinx_dsi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+ struct videomode *vm = &dsi->vm;
+ struct drm_display_mode *m = adjusted_mode;
+
+ vm->hactive = m->hdisplay;
+ vm->vactive = m->vdisplay;
+ vm->vfront_porch = m->vsync_start - m->vdisplay;
+ vm->vback_porch = m->vtotal - m->vsync_end;
+ vm->vsync_len = m->vsync_end - m->vsync_start;
+ vm->hfront_porch = m->hsync_start - m->hdisplay;
+ vm->hback_porch = m->htotal - m->hsync_end;
+ vm->hsync_len = m->hsync_end - m->hsync_start;
+ xilinx_dsi_set_display_mode(dsi);
+}
+
+static void xilinx_dsi_prepare(struct drm_encoder *encoder)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+
+ dev_dbg(dsi->dev, "%s %d\n", __func__, __LINE__);
+ xilinx_dsi_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void xilinx_dsi_commit(struct drm_encoder *encoder)
+{
+ struct xilinx_dsi *dsi = encoder_to_dsi(encoder);
+
+ dev_dbg(dsi->dev, "config and enable the DSI: %s %d\n",
+ __func__, __LINE__);
+
+ xilinx_dsi_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_helper_funcs xilinx_dsi_encoder_helper_funcs = {
+ .dpms = xilinx_dsi_encoder_dpms,
+ .mode_fixup = xilinx_dsi_mode_fixup,
+ .mode_set = xilinx_dsi_mode_set,
+ .prepare = xilinx_dsi_prepare,
+ .commit = xilinx_dsi_commit,
+};
+
+static const struct drm_encoder_funcs xilinx_dsi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xilinx_dsi_parse_dt(struct xilinx_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+ u32 datatype;
+
+ ret = of_property_read_u32(node, "xlnx,dsi-num-lanes",
+ &dsi->lanes);
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-num-lanes property\n");
+ return ret;
+ }
+
+ if ((dsi->lanes > 4) || (dsi->lanes < 1)) {
+ dev_err(dsi->dev, "%d lanes : invalid xlnx,dsi-num-lanes\n",
+ dsi->lanes);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,dsi-data-type", &datatype);
+
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-data-type property\n");
+ return ret;
+ }
+
+ dsi->format = datatype;
+
+ if (datatype > MIPI_DSI_FMT_RGB565) {
+ dev_err(dsi->dev, "Invalid xlnx,dsi-data-type string\n");
+ return -EINVAL;
+ }
+
+ dsi->mul_factor = xdsi_mul_factor[datatype];
+
+ dev_dbg(dsi->dev, "DSI controller num lanes = %d", dsi->lanes);
+
+ dev_dbg(dsi->dev, "DSI controller datatype = %d\n", datatype);
+
+ return 0;
+}
+
+static int xilinx_dsi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xilinx_dsi *dsi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &dsi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * DSI tx drivers. DRM framework can support more than one CRTCs and
+ * DSI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+
+ drm_encoder_init(drm_dev, encoder, &xilinx_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+
+ drm_encoder_helper_add(encoder, &xilinx_dsi_encoder_helper_funcs);
+
+ ret = xilinx_dsi_create_connector(encoder);
+ if (ret) {
+ dev_err(dsi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ ret = mipi_dsi_host_register(&dsi->dsi_host);
+ if (ret) {
+ xilinx_dsi_connector_destroy(&dsi->connector);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void xilinx_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xilinx_dsi *dsi = dev_get_drvdata(dev);
+
+ xilinx_dsi_encoder_dpms(&dsi->encoder, DRM_MODE_DPMS_OFF);
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+}
+
+static const struct component_ops xilinx_dsi_component_ops = {
+ .bind = xilinx_dsi_bind,
+ .unbind = xilinx_dsi_unbind,
+};
+
+static int xilinx_dsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_dsi *dsi;
+ int ret;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->dsi_host.ops = &xilinx_dsi_ops;
+ dsi->dsi_host.dev = dev;
+ dsi->dev = dev;
+
+ ret = xilinx_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->iomem = devm_ioremap_resource(dev, res);
+ dev_dbg(dsi->dev, "dsi virtual address = %p %s %d\n",
+ dsi->iomem, __func__, __LINE__);
+
+ if (IS_ERR(dsi->iomem)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(dsi->iomem);
+ }
+
+ platform_set_drvdata(pdev, dsi);
+
+ return component_add(dev, &xilinx_dsi_component_ops);
+}
+
+static int xilinx_dsi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &xilinx_dsi_component_ops);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_dsi_of_match[] = {
+ { .compatible = "xlnx,mipi-dsi-tx-subsystem"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_dsi_of_match);
+
+static struct platform_driver dsi_driver = {
+ .probe = xilinx_dsi_probe,
+ .remove = xilinx_dsi_remove,
+ .driver = {
+ .name = "xilinx-mipi-dsi",
+ .owner = THIS_MODULE,
+ .of_match_table = xilinx_dsi_of_match,
+ },
+};
+
+module_platform_driver(dsi_driver);
+
+MODULE_AUTHOR("Siva Rajesh <sivaraj@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA MIPI DSI Tx Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_encoder.c b/drivers/gpu/drm/xilinx/xilinx_drm_encoder.c
new file mode 100644
index 000000000000..ca3f9f112162
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_encoder.c
@@ -0,0 +1,240 @@
+/*
+ * Xilinx DRM encoder driver for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_encoder.h"
+
+struct xilinx_drm_encoder {
+ struct drm_encoder_slave slave;
+ struct device *dev;
+ int dpms;
+};
+
+#define to_xilinx_encoder(x) \
+ container_of(x, struct xilinx_drm_encoder, slave)
+
+/* set encoder dpms */
+static void xilinx_drm_encoder_dpms(struct drm_encoder *base_encoder, int dpms)
+{
+ struct xilinx_drm_encoder *encoder;
+ struct drm_encoder_slave *encoder_slave;
+ const struct drm_encoder_slave_funcs *encoder_sfuncs;
+
+ encoder_slave = to_encoder_slave(base_encoder);
+ encoder_sfuncs = encoder_slave->slave_funcs;
+ encoder = to_xilinx_encoder(encoder_slave);
+
+ DRM_DEBUG_KMS("dpms: %d -> %d\n", encoder->dpms, dpms);
+
+ if (encoder->dpms == dpms)
+ return;
+
+ encoder->dpms = dpms;
+ if (encoder_sfuncs->dpms)
+ encoder_sfuncs->dpms(base_encoder, dpms);
+}
+
+/* adjust a mode if needed */
+static bool
+xilinx_drm_encoder_mode_fixup(struct drm_encoder *base_encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_encoder_slave *encoder_slave;
+ const struct drm_encoder_slave_funcs *encoder_sfuncs = NULL;
+ bool ret = true;
+
+ encoder_slave = to_encoder_slave(base_encoder);
+ encoder_sfuncs = encoder_slave->slave_funcs;
+ if (encoder_sfuncs->mode_fixup)
+ ret = encoder_sfuncs->mode_fixup(base_encoder, mode,
+ adjusted_mode);
+
+ return ret;
+}
+
+/* set mode to xilinx encoder */
+static void xilinx_drm_encoder_mode_set(struct drm_encoder *base_encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_encoder_slave *encoder_slave;
+ const struct drm_encoder_slave_funcs *encoder_sfuncs;
+
+ DRM_DEBUG_KMS("h: %d, v: %d\n",
+ adjusted_mode->hdisplay, adjusted_mode->vdisplay);
+ DRM_DEBUG_KMS("refresh: %d, pclock: %d khz\n",
+ adjusted_mode->vrefresh, adjusted_mode->clock);
+
+ encoder_slave = to_encoder_slave(base_encoder);
+ encoder_sfuncs = encoder_slave->slave_funcs;
+ if (encoder_sfuncs->mode_set)
+ encoder_sfuncs->mode_set(base_encoder, mode, adjusted_mode);
+}
+
+/* apply mode to encoder pipe */
+static void xilinx_drm_encoder_commit(struct drm_encoder *base_encoder)
+{
+ /* start encoder with new mode */
+ xilinx_drm_encoder_dpms(base_encoder, DRM_MODE_DPMS_ON);
+}
+
+/* prepare encoder */
+static void xilinx_drm_encoder_prepare(struct drm_encoder *base_encoder)
+{
+ xilinx_drm_encoder_dpms(base_encoder, DRM_MODE_DPMS_OFF);
+}
+
+/* get crtc */
+static struct drm_crtc *
+xilinx_drm_encoder_get_crtc(struct drm_encoder *base_encoder)
+{
+ return base_encoder->crtc;
+}
+
+static const struct drm_encoder_helper_funcs xilinx_drm_encoder_helper_funcs = {
+ .dpms = xilinx_drm_encoder_dpms,
+ .mode_fixup = xilinx_drm_encoder_mode_fixup,
+ .mode_set = xilinx_drm_encoder_mode_set,
+ .prepare = xilinx_drm_encoder_prepare,
+ .commit = xilinx_drm_encoder_commit,
+ .get_crtc = xilinx_drm_encoder_get_crtc,
+};
+
+/* destroy encoder */
+void xilinx_drm_encoder_destroy(struct drm_encoder *base_encoder)
+{
+ struct xilinx_drm_encoder *encoder;
+ struct drm_encoder_slave *encoder_slave;
+
+ encoder_slave = to_encoder_slave(base_encoder);
+ encoder = to_xilinx_encoder(encoder_slave);
+
+ /* make sure encoder is off */
+ xilinx_drm_encoder_dpms(base_encoder, DRM_MODE_DPMS_OFF);
+
+ drm_encoder_cleanup(base_encoder);
+ put_device(encoder->dev);
+}
+
+static const struct drm_encoder_funcs xilinx_drm_encoder_funcs = {
+ .destroy = xilinx_drm_encoder_destroy,
+};
+
+/* create encoder */
+struct drm_encoder *xilinx_drm_encoder_create(struct drm_device *drm,
+ struct device_node *node)
+{
+ struct xilinx_drm_encoder *encoder;
+ struct i2c_client *i2c_slv;
+ struct i2c_driver *i2c_driver;
+ struct drm_i2c_encoder_driver *drm_i2c_driver;
+ struct device_driver *device_driver;
+ struct platform_device *platform_slv;
+ struct platform_driver *platform_driver;
+ struct drm_platform_encoder_driver *drm_platform_driver;
+ int ret = 0;
+
+ encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder)
+ return ERR_PTR(-ENOMEM);
+
+ encoder->dpms = DRM_MODE_DPMS_OFF;
+
+ /* FIXME: Use DT to figure out crtcs / clones */
+ encoder->slave.base.possible_crtcs = 1;
+ encoder->slave.base.possible_clones = ~0;
+ ret = drm_encoder_init(drm, &encoder->slave.base,
+ &xilinx_drm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize drm encoder\n");
+ return ERR_PTR(ret);
+ }
+
+ drm_encoder_helper_add(&encoder->slave.base,
+ &xilinx_drm_encoder_helper_funcs);
+
+ /* initialize slave encoder */
+ i2c_slv = of_find_i2c_device_by_node(node);
+ if (i2c_slv && i2c_slv->dev.driver) {
+ i2c_driver = to_i2c_driver(i2c_slv->dev.driver);
+ drm_i2c_driver = to_drm_i2c_encoder_driver(i2c_driver);
+ if (!drm_i2c_driver || !drm_i2c_driver->encoder_init) {
+ DRM_DEBUG_KMS("failed to initialize i2c slave\n");
+ ret = -EPROBE_DEFER;
+ goto err_out;
+ }
+
+ encoder->dev = &i2c_slv->dev;
+ ret = drm_i2c_driver->encoder_init(i2c_slv, drm,
+ &encoder->slave);
+ } else {
+ platform_slv = of_find_device_by_node(node);
+ if (!platform_slv) {
+ DRM_DEBUG_KMS("failed to get an encoder slv\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ device_driver = platform_slv->dev.driver;
+ if (!device_driver) {
+ DRM_DEBUG_KMS("failed to get device driver\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ platform_driver = to_platform_driver(device_driver);
+ drm_platform_driver =
+ to_drm_platform_encoder_driver(platform_driver);
+ if (!drm_platform_driver ||
+ !drm_platform_driver->encoder_init) {
+ DRM_DEBUG_KMS("failed to initialize platform slave\n");
+ ret = -EPROBE_DEFER;
+ goto err_out;
+ }
+
+ encoder->dev = &platform_slv->dev;
+ ret = drm_platform_driver->encoder_init(platform_slv, drm,
+ &encoder->slave);
+ }
+
+ if (ret) {
+ DRM_ERROR("failed to initialize encoder slave\n");
+ goto err_out;
+ }
+
+ if (!encoder->slave.slave_funcs) {
+ DRM_ERROR("there's no encoder slave function\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ return &encoder->slave.base;
+
+err_out:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_encoder.h b/drivers/gpu/drm/xilinx/xilinx_drm_encoder.h
new file mode 100644
index 000000000000..7707f14db499
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_encoder.h
@@ -0,0 +1,28 @@
+/*
+ * Xilinx DRM encoder header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_ENCODER_H_
+#define _XILINX_DRM_ENCODER_H_
+
+struct drm_device;
+struct drm_encoder;
+
+struct drm_encoder *xilinx_drm_encoder_create(struct drm_device *drm,
+ struct device_node *node);
+void xilinx_drm_encoder_destroy(struct drm_encoder *base_encoder);
+
+#endif /* _XILINX_DRM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_fb.c b/drivers/gpu/drm/xilinx/xilinx_drm_fb.c
new file mode 100644
index 000000000000..e9fe1daaaec2
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_fb.c
@@ -0,0 +1,516 @@
+/*
+ * Xilinx DRM KMS Framebuffer helper
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * Based on drm_fb_cma_helper.c
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_fb.h"
+
+struct xilinx_drm_fb {
+ struct drm_framebuffer base;
+ struct drm_gem_cma_object *obj[4];
+};
+
+struct xilinx_drm_fbdev {
+ struct drm_fb_helper fb_helper;
+ struct xilinx_drm_fb *fb;
+ unsigned int align;
+ unsigned int vres_mult;
+};
+
+static inline struct xilinx_drm_fbdev *to_fbdev(struct drm_fb_helper *fb_helper)
+{
+ return container_of(fb_helper, struct xilinx_drm_fbdev, fb_helper);
+}
+
+static inline struct xilinx_drm_fb *to_fb(struct drm_framebuffer *base_fb)
+{
+ return container_of(base_fb, struct xilinx_drm_fb, base);
+}
+
+static void xilinx_drm_fb_destroy(struct drm_framebuffer *base_fb)
+{
+ struct xilinx_drm_fb *fb = to_fb(base_fb);
+ int i;
+
+ for (i = 0; i < 4; i++)
+ if (fb->obj[i])
+ drm_gem_object_put_unlocked(&fb->obj[i]->base);
+
+ drm_framebuffer_cleanup(base_fb);
+ kfree(fb);
+}
+
+static int xilinx_drm_fb_create_handle(struct drm_framebuffer *base_fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct xilinx_drm_fb *fb = to_fb(base_fb);
+
+ return drm_gem_handle_create(file_priv, &fb->obj[0]->base, handle);
+}
+
+static struct drm_framebuffer_funcs xilinx_drm_fb_funcs = {
+ .destroy = xilinx_drm_fb_destroy,
+ .create_handle = xilinx_drm_fb_create_handle,
+};
+
+/**
+ * xilinx_drm_fb_alloc - Allocate a xilinx_drm_fb
+ * @drm: DRM object
+ * @mode_cmd: drm_mode_fb_cmd2 struct
+ * @obj: pointers for returned drm_gem_cma_objects
+ * @num_planes: number of planes to be allocated
+ *
+ * This function is based on drm_fb_cma_alloc().
+ *
+ * Return: a xilinx_drm_fb object, or ERR_PTR.
+ */
+static struct xilinx_drm_fb *
+xilinx_drm_fb_alloc(struct drm_device *drm,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_cma_object **obj, unsigned int num_planes)
+{
+ struct xilinx_drm_fb *fb;
+ int ret;
+ int i;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(drm, &fb->base, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb->obj[i] = obj[i];
+
+ ret = drm_framebuffer_init(drm, &fb->base, &xilinx_drm_fb_funcs);
+ if (ret) {
+ DRM_ERROR("Failed to initialize framebuffer: %d\n", ret);
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+
+ return fb;
+}
+
+/**
+ * xilinx_drm_fb_get_gem_obj - Get CMA GEM object for framebuffer
+ * @base_fb: the framebuffer
+ * @plane: which plane
+ *
+ * This function is based on drm_fb_cma_get_gem_obj().
+ *
+ * Return: a CMA GEM object for given framebuffer, or NULL if not available.
+ */
+struct drm_gem_cma_object *
+xilinx_drm_fb_get_gem_obj(struct drm_framebuffer *base_fb, unsigned int plane)
+{
+ struct xilinx_drm_fb *fb = to_fb(base_fb);
+
+ if (plane >= 4)
+ return NULL;
+
+ return fb->obj[plane];
+}
+
+static int xilinx_drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_mode_set *modeset;
+ int ret = 0;
+ int i;
+
+ if (oops_in_progress)
+ return -EBUSY;
+
+ drm_modeset_lock_all(dev);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ modeset = &fb_helper->crtc_info[i].mode_set;
+
+ modeset->x = var->xoffset;
+ modeset->y = var->yoffset;
+
+ if (modeset->num_connectors) {
+ ret = drm_mode_set_config_internal(modeset);
+ if (!ret) {
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ }
+ }
+ }
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+
+static int
+xilinx_drm_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ unsigned int i;
+ int ret = 0;
+
+ switch (cmd) {
+ case FBIO_WAITFORVSYNC:
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set;
+ struct drm_crtc *crtc;
+
+ mode_set = &fb_helper->crtc_info[i].mode_set;
+ crtc = mode_set->crtc;
+ ret = drm_crtc_vblank_get(crtc);
+ if (!ret) {
+ drm_crtc_wait_one_vblank(crtc);
+ drm_crtc_vblank_put(crtc);
+ }
+ }
+ return ret;
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static struct fb_ops xilinx_drm_fbdev_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = xilinx_drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_ioctl = xilinx_drm_fb_ioctl,
+};
+
+/**
+ * xilinx_drm_fbdev_create - Create the fbdev with a framebuffer
+ * @fb_helper: fb helper structure
+ * @sizes: framebuffer size info
+ *
+ * This function is based on drm_fbdev_cma_create().
+ *
+ * Return: 0 if successful, or the error code.
+ */
+static int xilinx_drm_fbdev_create(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct xilinx_drm_fbdev *fbdev = to_fbdev(fb_helper);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_device *drm = fb_helper->dev;
+ struct drm_gem_cma_object *obj;
+ struct drm_framebuffer *base_fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ size_t size;
+ int ret;
+
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = ALIGN(sizes->surface_width * bytes_per_pixel,
+ fbdev->align);
+ mode_cmd.pixel_format = xilinx_drm_get_format(drm);
+
+ mode_cmd.height *= fbdev->vres_mult;
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ obj = drm_gem_cma_create(drm, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ fbi = framebuffer_alloc(0, drm->dev);
+ if (!fbi) {
+ DRM_ERROR("Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_drm_gem_cma_free_object;
+ }
+
+ fbdev->fb = xilinx_drm_fb_alloc(drm, &mode_cmd, &obj, 1);
+ if (IS_ERR(fbdev->fb)) {
+ DRM_ERROR("Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(fbdev->fb);
+ goto err_framebuffer_release;
+ }
+
+ base_fb = &fbdev->fb->base;
+ fb_helper->fb = base_fb;
+ fb_helper->fbdev = fbi;
+
+ fbi->par = fb_helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &xilinx_drm_fbdev_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("Failed to allocate color map.\n");
+ goto err_xilinx_drm_fb_destroy;
+ }
+
+ drm_fb_helper_fill_info(fbi, fb_helper, sizes);
+ fbi->var.yres = base_fb->height / fbdev->vres_mult;
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * base_fb->pitches[0];
+
+ drm->mode_config.fb_base = (resource_size_t)obj->paddr;
+ fbi->screen_base = (char __iomem *)(obj->vaddr + offset);
+ fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+ fbi->screen_size = size;
+ fbi->fix.smem_len = size;
+
+ return 0;
+
+err_xilinx_drm_fb_destroy:
+ drm_framebuffer_unregister_private(base_fb);
+ xilinx_drm_fb_destroy(base_fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+ drm_gem_cma_free_object(&obj->base);
+ return ret;
+}
+
+static struct drm_fb_helper_funcs xilinx_drm_fb_helper_funcs = {
+ .fb_probe = xilinx_drm_fbdev_create,
+};
+
+/**
+ * xilinx_drm_fb_init - Allocate and initializes the Xilinx framebuffer
+ * @drm: DRM device
+ * @preferred_bpp: preferred bits per pixel for the device
+ * @num_crtc: number of CRTCs
+ * @max_conn_count: maximum number of connectors
+ * @align: alignment value for pitch
+ * @vres_mult: multiplier for virtual resolution
+ *
+ * This function is based on drm_fbdev_cma_init().
+ *
+ * Return: a newly allocated drm_fb_helper struct or a ERR_PTR.
+ */
+struct drm_fb_helper *
+xilinx_drm_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult)
+{
+ struct xilinx_drm_fbdev *fbdev;
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ DRM_ERROR("Failed to allocate drm fbdev.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fbdev->vres_mult = vres_mult;
+
+ fbdev->align = align;
+ fb_helper = &fbdev->fb_helper;
+ drm_fb_helper_prepare(drm, fb_helper, &xilinx_drm_fb_helper_funcs);
+
+ ret = drm_fb_helper_init(drm, fb_helper, max_conn_count);
+ if (ret < 0) {
+ DRM_ERROR("Failed to initialize drm fb helper.\n");
+ goto err_free;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+ if (ret < 0) {
+ DRM_ERROR("Failed to add connectors.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ drm_helper_disable_unused_functions(drm);
+
+ ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
+ if (ret < 0) {
+ DRM_ERROR("Failed to set initial hw configuration.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return fb_helper;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_free:
+ kfree(fbdev);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * xilinx_drm_fbdev_fini - Free the Xilinx framebuffer
+ * @fb_helper: drm_fb_helper struct
+ *
+ * This function is based on drm_fbdev_cma_fini().
+ */
+void xilinx_drm_fb_fini(struct drm_fb_helper *fb_helper)
+{
+ struct xilinx_drm_fbdev *fbdev;
+
+ if (!fb_helper)
+ return;
+
+ fbdev = to_fbdev(fb_helper);
+ if (fbdev->fb_helper.fbdev) {
+ struct fb_info *info;
+ int ret;
+
+ info = fbdev->fb_helper.fbdev;
+ ret = unregister_framebuffer(info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ if (fbdev->fb) {
+ drm_framebuffer_unregister_private(&fbdev->fb->base);
+ xilinx_drm_fb_destroy(&fbdev->fb->base);
+ }
+
+ drm_fb_helper_fini(&fbdev->fb_helper);
+ kfree(fbdev);
+}
+
+/**
+ * xilinx_drm_fb_restore_mode - Restores initial framebuffer mode
+ * @fb_helper: drm_fb_helper struct, may be NULL
+ *
+ * This function is based on drm_fbdev_cma_restore_mode() and usually called
+ * from the Xilinx DRM drivers lastclose callback.
+ */
+void xilinx_drm_fb_restore_mode(struct drm_fb_helper *fb_helper)
+{
+ if (!fb_helper)
+ return;
+
+ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+}
+
+/**
+ * xilinx_drm_fb_create - (struct drm_mode_config_funcs *)->fb_create callback
+ * @drm: DRM device
+ * @file_priv: drm file private data
+ * @mode_cmd: mode command for fb creation
+ *
+ * This functions creates a drm_framebuffer for given mode @mode_cmd. This
+ * functions is intended to be used for the fb_create callback function of
+ * drm_mode_config_funcs.
+ *
+ * Return: a drm_framebuffer object if successful, or ERR_PTR.
+ */
+struct drm_framebuffer *
+xilinx_drm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct xilinx_drm_fb *fb;
+ struct drm_gem_cma_object *objs[4];
+ struct drm_gem_object *obj;
+ const struct drm_format_info *info;
+ struct drm_format_name_buf format_name;
+ unsigned int hsub;
+ unsigned int vsub;
+ int ret;
+ int i;
+
+ info = drm_format_info(mode_cmd->pixel_format);
+ if (!info) {
+ DRM_ERROR("Unsupported framebuffer format %s\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name));
+ return ERR_PTR(-EINVAL);
+ }
+
+ hsub = info->hsub;
+ vsub = info->vsub;
+
+ for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+
+ obj = drm_gem_object_lookup(file_priv,
+ mode_cmd->handles[i]);
+ if (!obj) {
+ DRM_ERROR("Failed to lookup GEM object\n");
+ ret = -ENXIO;
+ goto err_gem_object_unreference;
+ }
+
+ min_size = (height - 1) * mode_cmd->pitches[i] + width *
+ info->cpp[i] + mode_cmd->offsets[i];
+
+ if (obj->size < min_size) {
+ drm_gem_object_put_unlocked(obj);
+ ret = -EINVAL;
+ goto err_gem_object_unreference;
+ }
+ objs[i] = to_drm_gem_cma_obj(obj);
+ }
+
+ fb = xilinx_drm_fb_alloc(drm, mode_cmd, objs, i);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto err_gem_object_unreference;
+ }
+
+ fb->base.format = info;
+
+ return &fb->base;
+
+err_gem_object_unreference:
+ for (i--; i >= 0; i--)
+ drm_gem_object_put_unlocked(&objs[i]->base);
+ return ERR_PTR(ret);
+}
+
+/**
+ * xilinx_drm_fb_hotplug_event - Poll for hotpulug events
+ * @fb_helper: drm_fb_helper struct, may be NULL
+ *
+ * This function is based on drm_fbdev_cma_hotplug_event() and usually called
+ * from the Xilinx DRM drivers output_poll_changed callback.
+ */
+void xilinx_drm_fb_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+ if (!fb_helper)
+ return;
+
+ drm_fb_helper_hotplug_event(fb_helper);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_fb.h b/drivers/gpu/drm/xilinx/xilinx_drm_fb.h
new file mode 100644
index 000000000000..c8b436edd08d
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_fb.h
@@ -0,0 +1,38 @@
+/*
+ * Xilinx DRM KMS Framebuffer helper header
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_FB_H_
+#define _XILINX_DRM_FB_H_
+
+struct drm_fb_helper;
+
+struct drm_gem_cma_object *
+xilinx_drm_fb_get_gem_obj(struct drm_framebuffer *base_fb, unsigned int plane);
+
+struct drm_fb_helper *
+xilinx_drm_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult);
+void xilinx_drm_fb_fini(struct drm_fb_helper *fb_helper);
+
+void xilinx_drm_fb_restore_mode(struct drm_fb_helper *fb_helper);
+struct drm_framebuffer *
+xilinx_drm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+void xilinx_drm_fb_hotplug_event(struct drm_fb_helper *fb_helper);
+
+#endif /* _XILINX_DRM_FB_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_gem.c b/drivers/gpu/drm/xilinx/xilinx_drm_gem.c
new file mode 100644
index 000000000000..b554c200ca09
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_gem.c
@@ -0,0 +1,45 @@
+/*
+ * Xilinx DRM KMS GEM helper
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_gem.h"
+
+/*
+ * xilinx_drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+ * @file_priv: drm_file object
+ * @drm: DRM object
+ * @args: info for dumb scanout buffer creation
+ *
+ * This function is for dumb_create callback of drm_driver struct. Simply
+ * it wraps around drm_gem_cma_dumb_create() and sets the pitch value
+ * by retrieving the value from the device.
+ *
+ * Return: The return value from drm_gem_cma_dumb_create()
+ */
+int xilinx_drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ int pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ args->pitch = ALIGN(pitch, xilinx_drm_get_align(drm));
+
+ return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_gem.h b/drivers/gpu/drm/xilinx/xilinx_drm_gem.h
new file mode 100644
index 000000000000..9e05e78cb766
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_gem.h
@@ -0,0 +1,25 @@
+/*
+ * Xilinx DRM KMS GEM helper header
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_GEM_H_
+#define _XILINX_DRM_GEM_H_
+
+int xilinx_drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+#endif /* _XILINX_DRM_GEM_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_plane.c b/drivers/gpu/drm/xilinx/xilinx_drm_plane.c
new file mode 100644
index 000000000000..8467f22f86af
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_plane.c
@@ -0,0 +1,1098 @@
+/*
+ * Xilinx DRM plane driver for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+
+#include "xilinx_drm_dp_sub.h"
+#include "xilinx_drm_drv.h"
+#include "xilinx_drm_fb.h"
+#include "xilinx_drm_plane.h"
+
+#include "xilinx_cresample.h"
+#include "xilinx_osd.h"
+#include "xilinx_rgb2yuv.h"
+
+#define MAX_NUM_SUB_PLANES 4
+
+/**
+ * struct xilinx_drm_plane_dma - Xilinx drm plane VDMA object
+ *
+ * @chan: dma channel
+ * @xt: dma interleaved configuration template
+ * @sgl: data chunk for dma_interleaved_template
+ * @is_active: flag if the DMA is active
+ */
+struct xilinx_drm_plane_dma {
+ struct dma_chan *chan;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+ bool is_active;
+};
+
+/**
+ * struct xilinx_drm_plane - Xilinx drm plane object
+ *
+ * @base: base drm plane object
+ * @id: plane id
+ * @dpms: current dpms level
+ * @zpos: user requested z-position value
+ * @prio: actual layer priority
+ * @alpha: alpha value
+ * @alpha_enable: alpha enable value
+ * @primary: flag for primary plane
+ * @format: pixel format
+ * @dma: dma object
+ * @rgb2yuv: rgb2yuv instance
+ * @cresample: cresample instance
+ * @osd_layer: osd layer
+ * @dp_layer: DisplayPort subsystem layer
+ * @manager: plane manager
+ */
+struct xilinx_drm_plane {
+ struct drm_plane base;
+ int id;
+ int dpms;
+ unsigned int zpos;
+ unsigned int prio;
+ unsigned int alpha;
+ unsigned int alpha_enable;
+ bool primary;
+ u32 format;
+ struct xilinx_drm_plane_dma dma[MAX_NUM_SUB_PLANES];
+ struct xilinx_rgb2yuv *rgb2yuv;
+ struct xilinx_cresample *cresample;
+ struct xilinx_osd_layer *osd_layer;
+ struct xilinx_drm_dp_sub_layer *dp_layer;
+ struct xilinx_drm_plane_manager *manager;
+};
+
+#define MAX_PLANES 8
+
+/**
+ * struct xilinx_drm_plane_manager - Xilinx drm plane manager object
+ *
+ * @drm: drm device
+ * @node: plane device node
+ * @osd: osd instance
+ * @dp_sub: DisplayPort subsystem instance
+ * @num_planes: number of available planes
+ * @format: video format
+ * @max_width: maximum width
+ * @zpos_prop: z-position(priority) property
+ * @alpha_prop: alpha value property
+ * @alpha_enable_prop: alpha enable property
+ * @default_alpha: default alpha value
+ * @planes: xilinx drm planes
+ */
+struct xilinx_drm_plane_manager {
+ struct drm_device *drm;
+ struct device_node *node;
+ struct xilinx_osd *osd;
+ struct xilinx_drm_dp_sub *dp_sub;
+ int num_planes;
+ u32 format;
+ int max_width;
+ struct drm_property *zpos_prop;
+ struct drm_property *alpha_prop;
+ struct drm_property *alpha_enable_prop;
+ unsigned int default_alpha;
+ struct xilinx_drm_plane *planes[MAX_PLANES];
+};
+
+#define to_xilinx_plane(x) container_of(x, struct xilinx_drm_plane, base)
+
+void xilinx_drm_plane_commit(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ unsigned int i;
+
+ /* for xilinx video framebuffer dma, if used */
+ xilinx_xdma_drm_config(plane->dma[0].chan, plane->format);
+
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+
+ for (i = 0; i < MAX_NUM_SUB_PLANES; i++) {
+ struct xilinx_drm_plane_dma *dma = &plane->dma[i];
+
+ if (dma->chan && dma->is_active) {
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma->chan,
+ &dma->xt,
+ flags);
+ if (!desc) {
+ DRM_ERROR("failed to prepare DMA descriptor\n");
+ return;
+ }
+
+ dmaengine_submit(desc);
+
+ dma_async_issue_pending(dma->chan);
+ }
+ }
+}
+
+/* set plane dpms */
+void xilinx_drm_plane_dpms(struct drm_plane *base_plane, int dpms)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+ unsigned int i;
+
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+ DRM_DEBUG_KMS("dpms: %d -> %d\n", plane->dpms, dpms);
+
+ if (plane->dpms == dpms)
+ return;
+
+ plane->dpms = dpms;
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ if (manager->dp_sub) {
+ if (plane->primary) {
+ xilinx_drm_dp_sub_enable_alpha(manager->dp_sub,
+ plane->alpha_enable);
+ xilinx_drm_dp_sub_set_alpha(manager->dp_sub,
+ plane->alpha);
+ }
+ xilinx_drm_dp_sub_layer_enable(manager->dp_sub,
+ plane->dp_layer);
+ }
+
+ if (plane->rgb2yuv)
+ xilinx_rgb2yuv_enable(plane->rgb2yuv);
+
+ if (plane->cresample)
+ xilinx_cresample_enable(plane->cresample);
+
+ /* enable osd */
+ if (manager->osd) {
+ xilinx_osd_disable_rue(manager->osd);
+
+ xilinx_osd_layer_set_priority(plane->osd_layer,
+ plane->prio);
+ xilinx_osd_layer_enable_alpha(plane->osd_layer,
+ plane->alpha_enable);
+ xilinx_osd_layer_set_alpha(plane->osd_layer,
+ plane->alpha);
+ xilinx_osd_layer_enable(plane->osd_layer);
+
+ xilinx_osd_enable_rue(manager->osd);
+ }
+
+ xilinx_drm_plane_commit(base_plane);
+ break;
+ default:
+ /* disable/reset osd */
+ if (manager->osd) {
+ xilinx_osd_disable_rue(manager->osd);
+
+ xilinx_osd_layer_set_dimension(plane->osd_layer,
+ 0, 0, 0, 0);
+ xilinx_osd_layer_disable(plane->osd_layer);
+
+ xilinx_osd_enable_rue(manager->osd);
+ }
+
+ if (plane->cresample) {
+ xilinx_cresample_disable(plane->cresample);
+ xilinx_cresample_reset(plane->cresample);
+ }
+
+ if (plane->rgb2yuv) {
+ xilinx_rgb2yuv_disable(plane->rgb2yuv);
+ xilinx_rgb2yuv_reset(plane->rgb2yuv);
+ }
+
+ /* stop dma engine and release descriptors */
+ for (i = 0; i < MAX_NUM_SUB_PLANES; i++) {
+ if (plane->dma[i].chan && plane->dma[i].is_active)
+ dmaengine_terminate_all(plane->dma[i].chan);
+ }
+
+ if (manager->dp_sub)
+ xilinx_drm_dp_sub_layer_disable(manager->dp_sub,
+ plane->dp_layer);
+
+ break;
+ }
+}
+
+/* mode set a plane */
+int xilinx_drm_plane_mode_set(struct drm_plane *base_plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct drm_gem_cma_object *obj;
+ const struct drm_format_info *info;
+ struct drm_format_name_buf format_name;
+ size_t offset;
+ unsigned int hsub, vsub, i;
+
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+
+ /* configure cresample */
+ if (plane->cresample)
+ xilinx_cresample_configure(plane->cresample, crtc_w, crtc_h);
+
+ /* configure rgb2yuv */
+ if (plane->rgb2yuv)
+ xilinx_rgb2yuv_configure(plane->rgb2yuv, crtc_w, crtc_h);
+
+ DRM_DEBUG_KMS("h: %d(%d), v: %d(%d)\n",
+ src_w, crtc_x, src_h, crtc_y);
+ DRM_DEBUG_KMS("bpp: %d\n", fb->format->cpp[0] * 8);
+
+ info = fb->format;
+ if (!info) {
+ DRM_ERROR("Unsupported framebuffer format %s\n",
+ drm_get_format_name(info->format, &format_name));
+ return -EINVAL;
+ }
+
+ hsub = info->hsub;
+ vsub = info->vsub;
+
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = src_w / (i ? hsub : 1);
+ unsigned int height = src_h / (i ? vsub : 1);
+ unsigned int cpp = info->cpp[i];
+
+ if (!cpp)
+ cpp = xilinx_drm_format_bpp(fb->format->format) >> 3;
+
+ obj = xilinx_drm_fb_get_gem_obj(fb, i);
+ if (!obj) {
+ DRM_ERROR("failed to get a gem obj for fb\n");
+ return -EINVAL;
+ }
+
+ plane->dma[i].xt.numf = height;
+ plane->dma[i].sgl[0].size = drm_format_plane_width_bytes(info,
+ i,
+ width);
+ plane->dma[i].sgl[0].icg = fb->pitches[i] -
+ plane->dma[i].sgl[0].size;
+ offset = drm_format_plane_width_bytes(info, i, src_x);
+ offset += src_y * fb->pitches[i];
+ offset += fb->offsets[i];
+ plane->dma[i].xt.src_start = obj->paddr + offset;
+ plane->dma[i].xt.frame_size = 1;
+ plane->dma[i].xt.dir = DMA_MEM_TO_DEV;
+ plane->dma[i].xt.src_sgl = true;
+ plane->dma[i].xt.dst_sgl = false;
+ plane->dma[i].is_active = true;
+ }
+
+ for (; i < MAX_NUM_SUB_PLANES; i++)
+ plane->dma[i].is_active = false;
+
+ /* set OSD dimensions */
+ if (plane->manager->osd) {
+ xilinx_osd_disable_rue(plane->manager->osd);
+
+ xilinx_osd_layer_set_dimension(plane->osd_layer, crtc_x, crtc_y,
+ src_w, src_h);
+
+ xilinx_osd_enable_rue(plane->manager->osd);
+ }
+
+ if (plane->manager->dp_sub) {
+ int ret;
+
+ ret = xilinx_drm_dp_sub_layer_check_size(plane->manager->dp_sub,
+ plane->dp_layer,
+ src_w, src_h);
+ if (ret)
+ return ret;
+
+ ret = xilinx_drm_dp_sub_layer_set_fmt(plane->manager->dp_sub,
+ plane->dp_layer,
+ fb->format->format);
+ if (ret) {
+ DRM_ERROR("failed to set dp_sub layer fmt\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* update a plane. just call mode_set() with bit-shifted values */
+static int xilinx_drm_plane_update(struct drm_plane *base_plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ int ret;
+
+ ret = xilinx_drm_plane_mode_set(base_plane, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x >> 16, src_y >> 16,
+ src_w >> 16, src_h >> 16);
+ if (ret) {
+ DRM_ERROR("failed to mode-set a plane\n");
+ return ret;
+ }
+
+ /* make sure a plane is on */
+ if (plane->dpms != DRM_MODE_DPMS_ON)
+ xilinx_drm_plane_dpms(base_plane, DRM_MODE_DPMS_ON);
+ else
+ xilinx_drm_plane_commit(base_plane);
+
+ return 0;
+}
+
+/* disable a plane */
+static int xilinx_drm_plane_disable(struct drm_plane *base_plane,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ xilinx_drm_plane_dpms(base_plane, DRM_MODE_DPMS_OFF);
+
+ return 0;
+}
+
+/* destroy a plane */
+static void xilinx_drm_plane_destroy(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ unsigned int i;
+
+ xilinx_drm_plane_dpms(base_plane, DRM_MODE_DPMS_OFF);
+
+ plane->manager->planes[plane->id] = NULL;
+
+ drm_plane_cleanup(base_plane);
+
+ for (i = 0; i < MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan)
+ dma_release_channel(plane->dma[i].chan);
+
+ if (plane->manager->osd) {
+ xilinx_osd_layer_disable(plane->osd_layer);
+ xilinx_osd_layer_put(plane->osd_layer);
+ }
+
+ if (plane->manager->dp_sub) {
+ xilinx_drm_dp_sub_layer_disable(plane->manager->dp_sub,
+ plane->dp_layer);
+ xilinx_drm_dp_sub_layer_put(plane->manager->dp_sub,
+ plane->dp_layer);
+ }
+}
+
+/**
+ * xilinx_drm_plane_update_prio - Configure plane priorities based on zpos
+ * @manager: the plane manager
+ *
+ * Z-position values are user requested position of planes. The priority is
+ * the actual position of planes in hardware. Some hardware doesn't allow
+ * any duplicate priority, so this function needs to be called when a duplicate
+ * priority is found. Then planes are sorted by zpos value, and the priorities
+ * are reconfigured. A plane with lower plane ID gets assigned to the lower
+ * priority when planes have the same zpos value.
+ */
+static void
+xilinx_drm_plane_update_prio(struct xilinx_drm_plane_manager *manager)
+{
+ struct xilinx_drm_plane *planes[MAX_PLANES];
+ struct xilinx_drm_plane *plane;
+ unsigned int i, j;
+
+ /* sort planes by zpos */
+ for (i = 0; i < manager->num_planes; i++) {
+ plane = manager->planes[i];
+
+ for (j = i; j > 0; --j) {
+ if (planes[j - 1]->zpos <= plane->zpos)
+ break;
+ planes[j] = planes[j - 1];
+ }
+
+ planes[j] = plane;
+ }
+
+ xilinx_osd_disable_rue(manager->osd);
+
+ /* remove duplicates by reassigning priority */
+ for (i = 0; i < manager->num_planes; i++) {
+ planes[i]->prio = i;
+ xilinx_osd_layer_set_priority(planes[i]->osd_layer,
+ planes[i]->prio);
+ }
+
+ xilinx_osd_enable_rue(manager->osd);
+}
+
+static void xilinx_drm_plane_set_zpos(struct drm_plane *base_plane,
+ unsigned int zpos)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+ bool update = false;
+ int i;
+
+ for (i = 0; i < manager->num_planes; i++) {
+ if (manager->planes[i] != plane &&
+ manager->planes[i]->prio == zpos) {
+ update = true;
+ break;
+ }
+ }
+
+ plane->zpos = zpos;
+
+ if (update) {
+ xilinx_drm_plane_update_prio(manager);
+ } else {
+ plane->prio = zpos;
+ xilinx_osd_layer_set_priority(plane->osd_layer, plane->prio);
+ }
+}
+
+static void xilinx_drm_plane_set_alpha(struct drm_plane *base_plane,
+ unsigned int alpha)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+
+ plane->alpha = alpha;
+
+ if (plane->osd_layer)
+ xilinx_osd_layer_set_alpha(plane->osd_layer, plane->alpha);
+ else if (manager->dp_sub)
+ xilinx_drm_dp_sub_set_alpha(manager->dp_sub, plane->alpha);
+}
+
+static void xilinx_drm_plane_enable_alpha(struct drm_plane *base_plane,
+ bool enable)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+
+ plane->alpha_enable = enable;
+
+ if (plane->osd_layer)
+ xilinx_osd_layer_enable_alpha(plane->osd_layer, enable);
+ else if (manager->dp_sub)
+ xilinx_drm_dp_sub_enable_alpha(manager->dp_sub, enable);
+}
+
+/* set property of a plane */
+static int xilinx_drm_plane_set_property(struct drm_plane *base_plane,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+
+ if (property == manager->zpos_prop)
+ xilinx_drm_plane_set_zpos(base_plane, val);
+ else if (property == manager->alpha_prop)
+ xilinx_drm_plane_set_alpha(base_plane, val);
+ else if (property == manager->alpha_enable_prop)
+ xilinx_drm_plane_enable_alpha(base_plane, val);
+ else
+ return -EINVAL;
+
+ drm_object_property_set_value(&base_plane->base, property, val);
+
+ return 0;
+}
+
+static struct drm_plane_funcs xilinx_drm_plane_funcs = {
+ .update_plane = xilinx_drm_plane_update,
+ .disable_plane = xilinx_drm_plane_disable,
+ .destroy = xilinx_drm_plane_destroy,
+ .set_property = xilinx_drm_plane_set_property,
+};
+
+/* get a plane max width */
+int xilinx_drm_plane_get_max_width(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+
+ return plane->manager->max_width;
+}
+
+/* check if format is supported */
+bool xilinx_drm_plane_check_format(struct xilinx_drm_plane_manager *manager,
+ u32 format)
+{
+ int i;
+
+ for (i = 0; i < MAX_PLANES; i++)
+ if (manager->planes[i] &&
+ (manager->planes[i]->format == format))
+ return true;
+
+ return false;
+}
+
+/* get the number of planes */
+int xilinx_drm_plane_get_num_planes(struct xilinx_drm_plane_manager *manager)
+{
+ return manager->num_planes;
+}
+
+/**
+ * xilinx_drm_plane_restore - Restore the plane states
+ * @manager: the plane manager
+ *
+ * Restore the plane states to the default ones. Any state that needs to be
+ * restored should be here. This improves consistency as applications see
+ * the same default values, and removes mismatch between software and hardware
+ * values as software values are updated as hardware values are reset.
+ */
+void xilinx_drm_plane_restore(struct xilinx_drm_plane_manager *manager)
+{
+ struct xilinx_drm_plane *plane;
+ unsigned int i;
+
+ /*
+ * Reinitialize property default values as they get reset by DPMS OFF
+ * operation. User will read the correct default values later, and
+ * planes will be initialized with default values.
+ */
+ for (i = 0; i < manager->num_planes; i++) {
+ plane = manager->planes[i];
+
+ plane->prio = plane->id;
+ plane->zpos = plane->id;
+ if (manager->zpos_prop)
+ drm_object_property_set_value(&plane->base.base,
+ manager->zpos_prop,
+ plane->prio);
+
+ plane->alpha = manager->default_alpha;
+ if (manager->alpha_prop)
+ drm_object_property_set_value(&plane->base.base,
+ manager->alpha_prop,
+ plane->alpha);
+
+ plane->alpha_enable = true;
+ if (manager->alpha_enable_prop)
+ drm_object_property_set_value(&plane->base.base,
+ manager->alpha_enable_prop, true);
+ }
+}
+
+/* get the plane format */
+u32 xilinx_drm_plane_get_format(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+
+ return plane->format;
+}
+
+/**
+ * xilinx_drm_plane_get_align - Get the alignment value for pitch
+ * @base_plane: Base drm plane object
+ *
+ * Get the alignment value for pitch from the dma device
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+unsigned int xilinx_drm_plane_get_align(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+
+ return 1 << plane->dma[0].chan->device->copy_align;
+}
+
+/* create plane properties */
+static void
+xilinx_drm_plane_create_property(struct xilinx_drm_plane_manager *manager)
+{
+ if (manager->osd)
+ manager->zpos_prop = drm_property_create_range(manager->drm, 0,
+ "zpos", 0, manager->num_planes - 1);
+
+ if (manager->osd || manager->dp_sub) {
+ manager->alpha_prop = drm_property_create_range(manager->drm, 0,
+ "alpha", 0, manager->default_alpha);
+ manager->alpha_enable_prop =
+ drm_property_create_bool(manager->drm, 0,
+ "global alpha enable");
+ }
+}
+
+/* attach plane properties */
+static void xilinx_drm_plane_attach_property(struct drm_plane *base_plane)
+{
+ struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane);
+ struct xilinx_drm_plane_manager *manager = plane->manager;
+
+ if (manager->zpos_prop)
+ drm_object_attach_property(&base_plane->base,
+ manager->zpos_prop,
+ plane->id);
+
+ if (manager->alpha_prop) {
+ if (manager->dp_sub && !plane->primary)
+ return;
+
+ drm_object_attach_property(&base_plane->base,
+ manager->alpha_prop,
+ manager->default_alpha);
+ drm_object_attach_property(&base_plane->base,
+ manager->alpha_enable_prop, false);
+ }
+
+ plane->alpha_enable = true;
+}
+
+/**
+ * xilinx_drm_plane_manager_dpms - Set DPMS for the Xilinx plane manager
+ * @manager: Xilinx plane manager object
+ * @dpms: requested DPMS
+ *
+ * Set the Xilinx plane manager to the given DPMS state. This function is
+ * usually called from the CRTC driver with calling xilinx_drm_plane_dpms().
+ */
+void xilinx_drm_plane_manager_dpms(struct xilinx_drm_plane_manager *manager,
+ int dpms)
+{
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ if (manager->dp_sub) {
+ xilinx_drm_dp_sub_set_bg_color(manager->dp_sub,
+ 0, 0, 0);
+ xilinx_drm_dp_sub_enable(manager->dp_sub);
+ }
+
+ if (manager->osd) {
+ xilinx_osd_disable_rue(manager->osd);
+ xilinx_osd_enable(manager->osd);
+ xilinx_osd_enable_rue(manager->osd);
+ }
+
+ break;
+ default:
+ if (manager->osd)
+ xilinx_osd_reset(manager->osd);
+
+ if (manager->dp_sub)
+ xilinx_drm_dp_sub_disable(manager->dp_sub);
+
+ break;
+ }
+}
+
+/**
+ * xilinx_drm_plane_manager_mode_set - Set the mode to the Xilinx plane manager
+ * @manager: Xilinx plane manager object
+ * @crtc_w: CRTC width
+ * @crtc_h: CRTC height
+ *
+ * Set the width and height of the Xilinx plane manager. This function is uaully
+ * called from the CRTC driver before calling the xilinx_drm_plane_mode_set().
+ */
+void xilinx_drm_plane_manager_mode_set(struct xilinx_drm_plane_manager *manager,
+ unsigned int crtc_w, unsigned int crtc_h)
+{
+ if (manager->osd)
+ xilinx_osd_set_dimension(manager->osd, crtc_w, crtc_h);
+}
+
+/* create a plane */
+static struct xilinx_drm_plane *
+xilinx_drm_plane_create(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs, bool primary)
+{
+ struct xilinx_drm_plane *plane;
+ struct device *dev = manager->drm->dev;
+ char plane_name[16];
+ struct device_node *plane_node;
+ struct device_node *sub_node;
+ struct property *prop;
+ const char *dma_name;
+ enum drm_plane_type type;
+ u32 fmt_in = 0;
+ u32 fmt_out = 0;
+ const char *fmt;
+ int i;
+ int ret;
+ u32 *fmts = NULL;
+ unsigned int num_fmts = 0;
+
+ for (i = 0; i < manager->num_planes; i++)
+ if (!manager->planes[i])
+ break;
+
+ if (i >= manager->num_planes) {
+ DRM_ERROR("failed to allocate plane\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ snprintf(plane_name, sizeof(plane_name), "plane%d", i);
+ plane_node = of_get_child_by_name(manager->node, plane_name);
+ if (!plane_node) {
+ DRM_ERROR("failed to find a plane node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL);
+ if (!plane) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ plane->primary = primary;
+ plane->id = i;
+ plane->prio = i;
+ plane->zpos = i;
+ plane->alpha = manager->default_alpha;
+ plane->dpms = DRM_MODE_DPMS_OFF;
+ plane->format = 0;
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+
+ i = 0;
+ of_property_for_each_string(plane_node, "dma-names", prop, dma_name) {
+ if (i >= MAX_NUM_SUB_PLANES) {
+ DRM_WARN("%s contains too many sub-planes (dma-names), indexes %d and above ignored\n",
+ of_node_full_name(plane_node),
+ MAX_NUM_SUB_PLANES);
+ break;
+ }
+ plane->dma[i].chan = of_dma_request_slave_channel(plane_node,
+ dma_name);
+ if (IS_ERR(plane->dma[i].chan)) {
+ ret = PTR_ERR(plane->dma[i].chan);
+ DRM_ERROR("failed to request dma channel \"%s\" for plane %s (err:%d)\n",
+ dma_name, of_node_full_name(plane_node), ret);
+ plane->dma[i].chan = NULL;
+ goto err_dma;
+ }
+ ++i;
+ }
+
+ if (i == 0) {
+ DRM_ERROR("plane \"%s\" doesn't have any dma channels (dma-names)\n",
+ of_node_full_name(plane_node));
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ /* probe color space converter */
+ sub_node = of_parse_phandle(plane_node, "xlnx,rgb2yuv", i);
+ if (sub_node) {
+ plane->rgb2yuv = xilinx_rgb2yuv_probe(dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(plane->rgb2yuv)) {
+ DRM_ERROR("failed to probe a rgb2yuv\n");
+ ret = PTR_ERR(plane->rgb2yuv);
+ goto err_dma;
+ }
+
+ /* rgb2yuv input format */
+ plane->format = DRM_FORMAT_XRGB8888;
+
+ /* rgb2yuv output format */
+ fmt_out = DRM_FORMAT_YUV444;
+ }
+
+ /* probe chroma resampler */
+ sub_node = of_parse_phandle(plane_node, "xlnx,cresample", i);
+ if (sub_node) {
+ plane->cresample = xilinx_cresample_probe(dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(plane->cresample)) {
+ DRM_ERROR("failed to probe a cresample\n");
+ ret = PTR_ERR(plane->cresample);
+ goto err_dma;
+ }
+
+ /* cresample input format */
+ fmt = xilinx_cresample_get_input_format_name(plane->cresample);
+ ret = xilinx_drm_format_by_name(fmt, &fmt_in);
+ if (ret)
+ goto err_dma;
+
+ /* format sanity check */
+ if ((fmt_out != 0) && (fmt_out != fmt_in)) {
+ DRM_ERROR("input/output format mismatch\n");
+ ret = -EINVAL;
+ goto err_dma;
+ }
+
+ if (plane->format == 0)
+ plane->format = fmt_in;
+
+ /* cresample output format */
+ fmt = xilinx_cresample_get_output_format_name(plane->cresample);
+ ret = xilinx_drm_format_by_name(fmt, &fmt_out);
+ if (ret)
+ goto err_dma;
+ }
+
+ /* create an OSD layer when OSD is available */
+ if (manager->osd) {
+ /* format sanity check */
+ if ((fmt_out != 0) && (fmt_out != manager->format)) {
+ DRM_ERROR("input/output format mismatch\n");
+ ret = -EINVAL;
+ goto err_dma;
+ }
+
+ /* create an osd layer */
+ plane->osd_layer = xilinx_osd_layer_get(manager->osd);
+ if (IS_ERR(plane->osd_layer)) {
+ DRM_ERROR("failed to create a osd layer\n");
+ ret = PTR_ERR(plane->osd_layer);
+ plane->osd_layer = NULL;
+ goto err_dma;
+ }
+
+ if (plane->format == 0)
+ plane->format = manager->format;
+ }
+
+ if (manager->dp_sub) {
+ plane->dp_layer = xilinx_drm_dp_sub_layer_get(manager->dp_sub,
+ primary);
+ if (IS_ERR(plane->dp_layer)) {
+ DRM_ERROR("failed to create a dp_sub layer\n");
+ ret = PTR_ERR(plane->dp_layer);
+ plane->dp_layer = NULL;
+ goto err_dma;
+ }
+
+ if (primary) {
+ ret = xilinx_drm_dp_sub_layer_set_fmt(manager->dp_sub,
+ plane->dp_layer,
+ manager->format);
+ if (ret) {
+ DRM_ERROR("failed to set dp_sub layer fmt\n");
+ goto err_dma;
+ }
+ }
+
+ plane->format =
+ xilinx_drm_dp_sub_layer_get_fmt(manager->dp_sub,
+ plane->dp_layer);
+ xilinx_drm_dp_sub_layer_get_fmts(manager->dp_sub,
+ plane->dp_layer, &fmts,
+ &num_fmts);
+ }
+
+ /* If there's no IP other than VDMA, pick the manager's format */
+ if (plane->format == 0)
+ plane->format = manager->format;
+
+ /* initialize drm plane */
+ type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(manager->drm, &plane->base,
+ possible_crtcs, &xilinx_drm_plane_funcs,
+ fmts ? fmts : &plane->format,
+ num_fmts ? num_fmts : 1, NULL, type,
+ NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize plane\n");
+ goto err_init;
+ }
+ plane->manager = manager;
+ manager->planes[plane->id] = plane;
+
+ xilinx_drm_plane_attach_property(&plane->base);
+
+ of_node_put(plane_node);
+
+ return plane;
+
+err_init:
+ if (manager->dp_sub) {
+ xilinx_drm_dp_sub_layer_disable(manager->dp_sub,
+ plane->dp_layer);
+ xilinx_drm_dp_sub_layer_put(plane->manager->dp_sub,
+ plane->dp_layer);
+ }
+ if (manager->osd) {
+ xilinx_osd_layer_disable(plane->osd_layer);
+ xilinx_osd_layer_put(plane->osd_layer);
+ }
+err_dma:
+ for (i = 0; i < MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan)
+ dma_release_channel(plane->dma[i].chan);
+err_out:
+ of_node_put(plane_node);
+ return ERR_PTR(ret);
+}
+
+/* create a primary plane */
+struct drm_plane *
+xilinx_drm_plane_create_primary(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs)
+{
+ struct xilinx_drm_plane *plane;
+
+ plane = xilinx_drm_plane_create(manager, possible_crtcs, true);
+ if (IS_ERR(plane)) {
+ DRM_ERROR("failed to allocate a primary plane\n");
+ return ERR_CAST(plane);
+ }
+
+ return &plane->base;
+}
+
+/* create extra planes */
+int xilinx_drm_plane_create_planes(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs)
+{
+ struct xilinx_drm_plane *plane;
+ int i;
+
+ /* find if there any available plane, and create if available */
+ for (i = 0; i < manager->num_planes; i++) {
+ if (manager->planes[i])
+ continue;
+
+ plane = xilinx_drm_plane_create(manager, possible_crtcs, false);
+ if (IS_ERR(plane)) {
+ DRM_ERROR("failed to allocate a plane\n");
+ return PTR_ERR(plane);
+ }
+
+ manager->planes[i] = plane;
+ }
+
+ return 0;
+}
+
+/* initialize a plane manager: num_planes, format, max_width */
+static int
+xilinx_drm_plane_init_manager(struct xilinx_drm_plane_manager *manager)
+{
+ unsigned int format;
+ u32 drm_format;
+ int ret = 0;
+
+ if (manager->osd) {
+ manager->num_planes = xilinx_osd_get_num_layers(manager->osd);
+ manager->max_width = xilinx_osd_get_max_width(manager->osd);
+
+ format = xilinx_osd_get_format(manager->osd);
+ ret = xilinx_drm_format_by_code(format, &drm_format);
+ if (drm_format != manager->format)
+ ret = -EINVAL;
+ } else if (manager->dp_sub) {
+ manager->num_planes = XILINX_DRM_DP_SUB_NUM_LAYERS;
+ manager->max_width = XILINX_DRM_DP_SUB_MAX_WIDTH;
+ } else {
+ /* without osd, only one plane is supported */
+ manager->num_planes = 1;
+ manager->max_width = 4096;
+ }
+
+ return ret;
+}
+
+struct xilinx_drm_plane_manager *
+xilinx_drm_plane_probe_manager(struct drm_device *drm)
+{
+ struct xilinx_drm_plane_manager *manager;
+ struct device *dev = drm->dev;
+ struct device_node *sub_node;
+ const char *format;
+ int ret;
+
+ manager = devm_kzalloc(dev, sizeof(*manager), GFP_KERNEL);
+ if (!manager)
+ return ERR_PTR(-ENOMEM);
+
+ /* this node is used to create a plane */
+ manager->node = of_get_child_by_name(dev->of_node, "planes");
+ if (!manager->node) {
+ DRM_ERROR("failed to get a planes node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* check the base pixel format of plane manager */
+ ret = of_property_read_string(manager->node, "xlnx,pixel-format",
+ &format);
+ if (ret < 0) {
+ DRM_ERROR("failed to get a plane manager format\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = xilinx_drm_format_by_name(format, &manager->format);
+ if (ret < 0) {
+ DRM_ERROR("invalid plane manager format\n");
+ return ERR_PTR(ret);
+ }
+
+ manager->drm = drm;
+
+ /* probe an OSD. proceed even if there's no OSD */
+ sub_node = of_parse_phandle(dev->of_node, "xlnx,osd", 0);
+ if (sub_node) {
+ manager->osd = xilinx_osd_probe(dev, sub_node);
+ of_node_put(sub_node);
+ if (IS_ERR(manager->osd)) {
+ of_node_put(manager->node);
+ DRM_ERROR("failed to probe an osd\n");
+ return ERR_CAST(manager->osd);
+ }
+ manager->default_alpha = OSD_MAX_ALPHA;
+ }
+
+ manager->dp_sub = xilinx_drm_dp_sub_of_get(drm->dev->of_node);
+ if (IS_ERR(manager->dp_sub)) {
+ DRM_DEBUG_KMS("failed to get a dp_sub\n");
+ return ERR_CAST(manager->dp_sub);
+ } else if (manager->dp_sub) {
+ manager->default_alpha = XILINX_DRM_DP_SUB_MAX_ALPHA;
+ }
+
+ ret = xilinx_drm_plane_init_manager(manager);
+ if (ret) {
+ DRM_ERROR("failed to init a plane manager\n");
+ return ERR_PTR(ret);
+ }
+
+ xilinx_drm_plane_create_property(manager);
+
+ return manager;
+}
+
+void xilinx_drm_plane_remove_manager(struct xilinx_drm_plane_manager *manager)
+{
+ xilinx_drm_dp_sub_put(manager->dp_sub);
+ of_node_put(manager->node);
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_plane.h b/drivers/gpu/drm/xilinx/xilinx_drm_plane.h
new file mode 100644
index 000000000000..3d3616b5a9d1
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_plane.h
@@ -0,0 +1,61 @@
+/*
+ * Xilinx DRM plane header for Xilinx
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_PLANE_H_
+#define _XILINX_DRM_PLANE_H_
+
+struct drm_crtc;
+struct drm_plane;
+
+/* plane operations */
+void xilinx_drm_plane_dpms(struct drm_plane *base_plane, int dpms);
+void xilinx_drm_plane_commit(struct drm_plane *base_plane);
+int xilinx_drm_plane_mode_set(struct drm_plane *base_plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h);
+int xilinx_drm_plane_get_max_width(struct drm_plane *base_plane);
+u32 xilinx_drm_plane_get_format(struct drm_plane *base_plane);
+unsigned int xilinx_drm_plane_get_align(struct drm_plane *base_plane);
+
+/* plane manager operations */
+struct xilinx_drm_plane_manager;
+
+void
+xilinx_drm_plane_manager_mode_set(struct xilinx_drm_plane_manager *manager,
+ unsigned int crtc_w, unsigned int crtc_h);
+void xilinx_drm_plane_manager_dpms(struct xilinx_drm_plane_manager *manager,
+ int dpms);
+struct drm_plane *
+xilinx_drm_plane_create_primary(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs);
+int xilinx_drm_plane_create_planes(struct xilinx_drm_plane_manager *manager,
+ unsigned int possible_crtcs);
+
+bool xilinx_drm_plane_check_format(struct xilinx_drm_plane_manager *manager,
+ u32 format);
+int xilinx_drm_plane_get_num_planes(struct xilinx_drm_plane_manager *manager);
+
+void xilinx_drm_plane_restore(struct xilinx_drm_plane_manager *manager);
+
+struct xilinx_drm_plane_manager *
+xilinx_drm_plane_probe_manager(struct drm_device *drm);
+void xilinx_drm_plane_remove_manager(struct xilinx_drm_plane_manager *manager);
+
+#endif /* _XILINX_DRM_PLANE_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_sdi.c b/drivers/gpu/drm/xilinx/xilinx_drm_sdi.c
new file mode 100644
index 000000000000..c33b3dfb6809
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_sdi.c
@@ -0,0 +1,1452 @@
+/*
+ * Xilinx FPGA SDI Tx Controller driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/videomode.h>
+#include "xilinx_drm_sdi.h"
+#include "xilinx_vtc.h"
+
+/* SDI register offsets */
+#define XSDI_TX_RST_CTRL 0x00
+#define XSDI_TX_MDL_CTRL 0x04
+#define XSDI_TX_GLBL_IER 0x0C
+#define XSDI_TX_ISR_STAT 0x10
+#define XSDI_TX_IER_STAT 0x14
+#define XSDI_TX_ST352_LINE 0x18
+#define XSDI_TX_ST352_DATA_CH0 0x1C
+#define XSDI_TX_VER 0x3C
+#define XSDI_TX_SYS_CFG 0x40
+#define XSDI_TX_STS_SB_TDATA 0x60
+#define XSDI_TX_AXI4S_STS1 0x68
+#define XSDI_TX_AXI4S_STS2 0x6C
+
+/* MODULE_CTRL register masks */
+#define XSDI_TX_CTRL_MDL_EN_MASK BIT(0)
+#define XSDI_TX_CTRL_OUT_EN_MASK BIT(1)
+#define XSDI_TX_CTRL_M_MASK BIT(7)
+#define XSDI_TX_CTRL_INS_CRC_MASK BIT(12)
+#define XSDI_TX_CTRL_INS_ST352_MASK BIT(13)
+#define XSDI_TX_CTRL_OVR_ST352_MASK BIT(14)
+#define XSDI_TX_CTRL_INS_SYNC_BIT_MASK BIT(16)
+#define XSDI_TX_CTRL_SD_BITREP_BYPASS_MASK BIT(17)
+#define XSDI_TX_CTRL_USE_ANC_IN_MASK BIT(18)
+#define XSDI_TX_CTRL_INS_LN_MASK BIT(19)
+#define XSDI_TX_CTRL_INS_EDH_MASK BIT(20)
+#define XSDI_TX_CTRL_MODE_MASK 0x7
+#define XSDI_TX_CTRL_MUX_MASK 0x7
+#define XSDI_TX_CTRL_MODE_SHIFT 4
+#define XSDI_TX_CTRL_M_SHIFT 7
+#define XSDI_TX_CTRL_MUX_SHIFT 8
+#define XSDI_TX_CTRL_INS_CRC_SHIFT 12
+#define XSDI_TX_CTRL_INS_ST352_SHIFT 13
+#define XSDI_TX_CTRL_OVR_ST352_SHIFT 14
+#define XSDI_TX_CTRL_ST352_F2_EN_SHIFT 15
+#define XSDI_TX_CTRL_INS_SYNC_BIT_SHIFT 16
+#define XSDI_TX_CTRL_SD_BITREP_BYPASS_SHIFT 17
+#define XSDI_TX_CTRL_USE_ANC_IN_SHIFT 18
+#define XSDI_TX_CTRL_INS_LN_SHIFT 19
+#define XSDI_TX_CTRL_INS_EDH_SHIFT 20
+
+/* TX_ST352_LINE register masks */
+#define XSDI_TX_ST352_LINE_MASK GENMASK(10, 0)
+#define XSDI_TX_ST352_LINE_F2_SHIFT 16
+
+/* ISR STAT register masks */
+#define XSDI_GTTX_RSTDONE_INTR_MASK BIT(0)
+#define XSDI_TX_CE_ALIGN_ERR_INTR_MASK BIT(1)
+#define XSDI_AXI4S_VID_LOCK_INTR_MASK BIT(8)
+#define XSDI_OVERFLOW_INTR_MASK BIT(9)
+#define XSDI_UNDERFLOW_INTR_MASK BIT(10)
+#define XSDI_IER_EN_MASK (XSDI_GTTX_RSTDONE_INTR_MASK | \
+ XSDI_TX_CE_ALIGN_ERR_INTR_MASK | \
+ XSDI_OVERFLOW_INTR_MASK | \
+ XSDI_UNDERFLOW_INTR_MASK)
+
+/* RST_CTRL_OFFSET masks */
+#define XSDI_TX_BRIDGE_CTRL_EN_MASK BIT(8)
+#define XSDI_TX_AXI4S_CTRL_EN_MASK BIT(9)
+#define XSDI_TX_CTRL_EN_MASK BIT(0)
+
+/* STS_SB_TX_TDATA masks */
+#define XSDI_TX_TDATA_DONE_MASK BIT(0)
+#define XSDI_TX_TDATA_FAIL_MASK BIT(1)
+#define XSDI_TX_TDATA_GT_RESETDONE_MASK BIT(2)
+#define XSDI_TX_TDATA_SLEW_RATE_MASK BIT(3)
+#define XSDI_TX_TDATA_TXPLLCLKSEL_MASK GENMASK(5, 4)
+#define XSDI_TX_TDATA_GT_SYSCLKSEL_MASK GENMASK(7, 6)
+#define XSDI_TX_TDATA_FABRIC_RST_MASK BIT(8)
+#define XSDI_TX_TDATA_DRP_FAIL_MASK BIT(9)
+#define XSDI_TX_TDATA_FAIL_CODE_MASK GENMASK(14, 12)
+#define XSDI_TX_TDATA_DRP_FAIL_CNT_MASK 0xFF0000
+#define XSDI_TX_TDATA_GT_QPLL0LOCK_MASK BIT(24)
+#define XSDI_TX_TDATA_GT_QPLL1LOCK_MASK BIT(25)
+
+#define SDI_MAX_DATASTREAM 8
+
+#define XSDI_TX_MUX_SD_HD_3GA 0
+#define XSDI_TX_MUX_3GB 1
+#define XSDI_TX_MUX_8STREAM_6G_12G 2
+#define XSDI_TX_MUX_4STREAM_6G 3
+#define XSDI_TX_MUX_16STREAM_12G 4
+
+#define PIXELS_PER_CLK 2
+#define XSDI_CH_SHIFT 29
+#define XST352_PROG_PIC_MASK BIT(6)
+#define XST352_PROG_TRANS_MASK BIT(7)
+#define XST352_2048_SHIFT BIT(6)
+#define ST352_BYTE3 0x00
+#define ST352_BYTE4 0x01
+#define INVALID_VALUE -1
+#define GT_TIMEOUT 500
+
+static LIST_HEAD(xilinx_sdi_list);
+static DEFINE_MUTEX(xilinx_sdi_lock);
+/**
+ * enum payload_line_1 - Payload Ids Line 1 number
+ * @PAYLD_LN1_HD_3_6_12G: line 1 HD,3G,6G or 12G mode value
+ * @PAYLD_LN1_SDPAL: line 1 SD PAL mode value
+ * @PAYLD_LN1_SDNTSC: line 1 SD NTSC mode value
+ */
+enum payload_line_1 {
+ PAYLD_LN1_HD_3_6_12G = 10,
+ PAYLD_LN1_SDPAL = 9,
+ PAYLD_LN1_SDNTSC = 13
+};
+
+/**
+ * enum payload_line_2 - Payload Ids Line 2 number
+ * @PAYLD_LN2_HD_3_6_12G: line 2 HD,3G,6G or 12G mode value
+ * @PAYLD_LN2_SDPAL: line 2 SD PAL mode value
+ * @PAYLD_LN2_SDNTSC: line 2 SD NTSC mode value
+ */
+enum payload_line_2 {
+ PAYLD_LN2_HD_3_6_12G = 572,
+ PAYLD_LN2_SDPAL = 322,
+ PAYLD_LN2_SDNTSC = 276
+};
+
+/**
+ * enum sdi_modes - SDI modes
+ * @XSDI_MODE_HD: HD mode
+ * @XSDI_MODE_SD: SD mode
+ * @XSDI_MODE_3GA: 3GA mode
+ * @XSDI_MODE_3GB: 3GB mode
+ * @XSDI_MODE_6G: 6G mode
+ * @XSDI_MODE_12G: 12G mode
+ */
+enum sdi_modes {
+ XSDI_MODE_HD = 0,
+ XSDI_MODE_SD,
+ XSDI_MODE_3GA,
+ XSDI_MODE_3GB,
+ XSDI_MODE_6G,
+ XSDI_MODE_12G
+};
+
+/**
+ * struct xilinx_sdi - Core configuration SDI Tx subsystem device structure
+ * @encoder: DRM encoder structure
+ * @connector: DRM connector structure
+ * @vtc: Pointer to VTC structure
+ * @dev: device structure
+ * @base: Base address of SDI subsystem
+ * @mode_flags: SDI operation mode related flags
+ * @wait_event: wait event
+ * @event_received: wait event status
+ * @list: entry in the global SDI subsystem list
+ * @vblank_fn: vblank handler
+ * @vblank_data: vblank data to be used in vblank_fn
+ * @sdi_mode: configurable SDI mode parameter, supported values are:
+ * 0 - HD
+ * 1 - SD
+ * 2 - 3GA
+ * 3 - 3GB
+ * 4 - 6G
+ * 5 - 12G
+ * @sdi_mod_prop_val: configurable SDI mode parameter value
+ * @sdi_data_strm: configurable SDI data stream parameter
+ * @sdi_data_strm_prop_val: configurable number of SDI data streams
+ * value currently supported are 2, 4 and 8
+ * @is_frac_prop: configurable SDI fractional fps parameter
+ * @is_frac_prop_val: configurable SDI fractional fps parameter value
+ */
+struct xilinx_sdi {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct xilinx_vtc *vtc;
+ struct device *dev;
+ void __iomem *base;
+ u32 mode_flags;
+ wait_queue_head_t wait_event;
+ bool event_received;
+ struct list_head list;
+ void (*vblank_fn)(void *);
+ void *vblank_data;
+ struct drm_property *sdi_mode;
+ u32 sdi_mod_prop_val;
+ struct drm_property *sdi_data_strm;
+ u32 sdi_data_strm_prop_val;
+ struct drm_property *is_frac_prop;
+ bool is_frac_prop_val;
+};
+
+/**
+ * struct xilinx_sdi_display_config - SDI supported modes structure
+ * @mode: drm display mode
+ * @st352_byt2: st352 byte 2 value
+ * index 0 : value for integral fps
+ * index 1 : value for fractional fps
+ * @st352_byt1: st352 byte 1 value
+ * index 0 : value for HD mode
+ * index 1 : value for SD mode
+ * index 2 : value for 3GA
+ * index 3 : value for 3GB
+ * index 4 : value for 6G
+ * index 5 : value for 12G
+ */
+struct xlnx_sdi_display_config {
+ struct drm_display_mode mode;
+ u8 st352_byt2[2];
+ u8 st352_byt1[6];
+};
+
+/*
+ * xlnx_sdi_modes - SDI DRM modes
+ */
+static const struct xlnx_sdi_display_config xlnx_sdi_modes[] = {
+ /* 0 - dummy, VICs start at 1 */
+ { },
+ /* SD: 720x480i@60Hz */
+ {{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
+ 801, 858, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* SD: 720x576i@50Hz */
+ {{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
+ 795, 864, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* HD: 1280x720@25Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2990, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@24Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 3155, 4125, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@30Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2330, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@50Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@60Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1920x1080@24Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@25Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@30Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@48Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@50Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@60Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@24Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@25Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@30Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@48Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@50Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@60Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@24Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@25Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@30Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@30Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@25Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@24Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@48Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@50Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@60Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@60Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2136,
+ 2180, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@50Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@48Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@96Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2291,
+ 2379, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@100Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@120Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@96Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2377,
+ 2421, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@100Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2322,
+ 2366, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@120Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 6G: 3840x2160@30Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@25Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@24Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@24Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@25Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@30Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@48Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@50Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@60Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@48Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@50Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@60Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 593408, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+};
+
+#define connector_to_sdi(c) container_of(c, struct xilinx_sdi, connector)
+#define encoder_to_sdi(e) container_of(e, struct xilinx_sdi, encoder)
+
+/**
+ * xilinx_sdi_writel - Memory mapped SDI Tx register write
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ * @val: value to be written
+ *
+ * This function writes the value to SDI TX registers
+ */
+static inline void xilinx_sdi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+/**
+ * xilinx_sdi_readl - Memory mapped SDI Tx register read
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ *
+ * Return: The contents of the SDI Tx register
+ *
+ * This function returns the contents of the corresponding SDI Tx register.
+ */
+static inline u32 xilinx_sdi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xilinx_en_axi4s - Enable SDI Tx AXI4S-to-Video core
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx AXI4S-to-Video core.
+ */
+static void xilinx_en_axi4s(struct xilinx_sdi *sdi)
+{
+ u32 data;
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_AXI4S_CTRL_EN_MASK;
+ xilinx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xilinx_en_bridge - Enable SDI Tx bridge
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx bridge.
+ */
+static void xilinx_en_bridge(struct xilinx_sdi *sdi)
+{
+ u32 data;
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_BRIDGE_CTRL_EN_MASK;
+ xilinx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xilinx_sdi_set_default_drm_properties - Configure SDI DRM
+ * properties with their default values
+ * @sdi: SDI structure having the updated user parameters
+ */
+static void
+xilinx_sdi_set_default_drm_properties(struct xilinx_sdi *sdi)
+{
+ drm_object_property_set_value(&sdi->connector.base,
+ sdi->sdi_mode, 0);
+ drm_object_property_set_value(&sdi->connector.base,
+ sdi->sdi_data_strm, 0);
+ drm_object_property_set_value(&sdi->connector.base,
+ sdi->is_frac_prop, 0);
+}
+
+/**
+ * xilinx_sdi_irq_handler - SDI Tx interrupt
+ * @irq: irq number
+ * @data: irq data
+ *
+ * Return: IRQ_HANDLED for all cases.
+ *
+ * This is the compact GT ready interrupt.
+ */
+static irqreturn_t xilinx_sdi_irq_handler(int irq, void *data)
+{
+ struct xilinx_sdi *sdi = (struct xilinx_sdi *)data;
+ u32 reg;
+
+ reg = xilinx_sdi_readl(sdi->base, XSDI_TX_ISR_STAT);
+
+ if (reg & XSDI_GTTX_RSTDONE_INTR_MASK)
+ dev_dbg(sdi->dev, "GT reset interrupt received\n");
+ if (reg & XSDI_TX_CE_ALIGN_ERR_INTR_MASK)
+ dev_err_ratelimited(sdi->dev, "SDI SD CE align error\n");
+ if (reg & XSDI_OVERFLOW_INTR_MASK)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Overflow error\n");
+ if (reg & XSDI_UNDERFLOW_INTR_MASK)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Underflow error\n");
+ xilinx_sdi_writel(sdi->base, XSDI_TX_ISR_STAT,
+ reg & ~(XSDI_AXI4S_VID_LOCK_INTR_MASK));
+
+ reg = xilinx_sdi_readl(sdi->base, XSDI_TX_STS_SB_TDATA);
+ if (reg & XSDI_TX_TDATA_GT_RESETDONE_MASK) {
+ sdi->event_received = true;
+ wake_up_interruptible(&sdi->wait_event);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_sdi_set_payload_line - set ST352 packet line number
+ * @sdi: Pointer to SDI Tx structure
+ * @line_1: line number used to insert st352 packet for field 1.
+ * @line_2: line number used to insert st352 packet for field 2.
+ *
+ * This function set 352 packet line number.
+ */
+static void xilinx_sdi_set_payload_line(struct xilinx_sdi *sdi,
+ u32 line_1, u32 line_2)
+{
+ u32 data;
+
+ data = ((line_1 & XSDI_TX_ST352_LINE_MASK) |
+ ((line_2 & XSDI_TX_ST352_LINE_MASK) <<
+ XSDI_TX_ST352_LINE_F2_SHIFT));
+
+ xilinx_sdi_writel(sdi->base, XSDI_TX_ST352_LINE, data);
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data |= (1 << XSDI_TX_CTRL_ST352_F2_EN_SHIFT);
+
+ xilinx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xilinx_sdi_set_payload_data - set ST352 packet payload
+ * @sdi: Pointer to SDI Tx structure
+ * @data_strm: data stream number
+ * @payload: st352 packet payload
+ *
+ * This function set ST352 payload data to corresponding stream.
+ */
+static void xilinx_sdi_set_payload_data(struct xilinx_sdi *sdi,
+ u32 data_strm, u32 payload)
+{
+ xilinx_sdi_writel(sdi->base,
+ (XSDI_TX_ST352_DATA_CH0 + (data_strm * 4)), payload);
+}
+
+/**
+ * xilinx_sdi_set_display_disable - Disable the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xilinx_sdi_set_display_disable(struct xilinx_sdi *sdi)
+{
+ u32 i;
+
+ for (i = 0; i < SDI_MAX_DATASTREAM; i++)
+ xilinx_sdi_set_payload_data(sdi, i, 0);
+
+ xilinx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ xilinx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, 0);
+}
+
+/**
+ * xilinx_sdi_payload_config - config the SDI payload parameters
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: display mode
+ *
+ * This function config the SDI st352 payload parameter.
+ */
+static void xilinx_sdi_payload_config(struct xilinx_sdi *sdi, u32 mode)
+{
+ u32 payload_1, payload_2;
+
+ switch (mode) {
+ case XSDI_MODE_SD:
+ payload_1 = PAYLD_LN1_SDPAL;
+ payload_2 = PAYLD_LN2_SDPAL;
+ break;
+ case XSDI_MODE_HD:
+ case XSDI_MODE_3GA:
+ case XSDI_MODE_3GB:
+ case XSDI_MODE_6G:
+ case XSDI_MODE_12G:
+ payload_1 = PAYLD_LN1_HD_3_6_12G;
+ payload_2 = PAYLD_LN2_HD_3_6_12G;
+ break;
+ default:
+ payload_1 = 0;
+ payload_2 = 0;
+ break;
+ }
+
+ xilinx_sdi_set_payload_line(sdi, payload_1, payload_2);
+}
+
+/**
+ * xilinx_set_sdi_mode - Set mode parameters in SDI Tx
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: SDI Tx display mode
+ * @is_frac: 0 - integer 1 - fractional
+ * @mux_ptrn: specifiy the data stream interleaving pattern to be used
+ * This function config the SDI st352 payload parameter.
+ */
+static void xilinx_set_sdi_mode(struct xilinx_sdi *sdi, u32 mode,
+ bool is_frac, u32 mux_ptrn)
+{
+ u32 data;
+
+ xilinx_sdi_payload_config(sdi, mode);
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data &= ~((XSDI_TX_CTRL_MODE_MASK << XSDI_TX_CTRL_MODE_SHIFT) |
+ (XSDI_TX_CTRL_M_MASK) | (XSDI_TX_CTRL_MUX_MASK
+ << XSDI_TX_CTRL_MUX_SHIFT));
+
+ data |= (((mode & XSDI_TX_CTRL_MODE_MASK)
+ << XSDI_TX_CTRL_MODE_SHIFT) |
+ (is_frac << XSDI_TX_CTRL_M_SHIFT) |
+ ((mux_ptrn & XSDI_TX_CTRL_MUX_MASK) << XSDI_TX_CTRL_MUX_SHIFT));
+
+ xilinx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xilinx_sdi_set_config_parameters - Configure SDI Tx registers with parameters
+ * given from user application.
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI structure having drm_property parameters
+ * configured from user application and writes them into SDI IP registers.
+ */
+static void xilinx_sdi_set_config_parameters(struct xilinx_sdi *sdi)
+{
+ u32 mode;
+ int mux_ptrn = INVALID_VALUE;
+ bool is_frac;
+
+ mode = sdi->sdi_mod_prop_val;
+ is_frac = sdi->is_frac_prop_val;
+
+ switch (mode) {
+ case XSDI_MODE_3GA:
+ mux_ptrn = XSDI_TX_MUX_SD_HD_3GA;
+ break;
+ case XSDI_MODE_3GB:
+ mux_ptrn = XSDI_TX_MUX_3GB;
+ break;
+ case XSDI_MODE_6G:
+ if (sdi->sdi_data_strm_prop_val == 4)
+ mux_ptrn = XSDI_TX_MUX_4STREAM_6G;
+ else if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ case XSDI_MODE_12G:
+ if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ default:
+ mux_ptrn = 0;
+ break;
+ }
+ if (mux_ptrn == INVALID_VALUE) {
+ dev_err(sdi->dev, "%d data stream not supported for %d mode",
+ sdi->sdi_data_strm_prop_val, mode);
+ return;
+ }
+ xilinx_set_sdi_mode(sdi, mode, is_frac, mux_ptrn);
+}
+
+/**
+ * xilinx_sdi_connector_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @base_connector: pointer Xilinx SDI connector
+ * @property: pointer to the drm_property structure
+ * @value: SDI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the SDI structure property varabiles with the values.
+ * These values are later used to configure the SDI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int
+xilinx_sdi_connector_set_property(struct drm_connector *base_connector,
+ struct drm_property *property,
+ u64 value)
+{
+ struct xilinx_sdi *sdi = connector_to_sdi(base_connector);
+
+ if (property == sdi->sdi_mode)
+ sdi->sdi_mod_prop_val = (unsigned int)value;
+ else if (property == sdi->sdi_data_strm)
+ sdi->sdi_data_strm_prop_val = (unsigned int)value;
+ else if (property == sdi->is_frac_prop)
+ sdi->is_frac_prop_val = !!value;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * xilinx_sdi_get_mode_id - Search for a video mode in the supported modes table
+ *
+ * @mode: mode being searched
+ *
+ * Return: true if mode is found
+ */
+static int xilinx_sdi_get_mode_id(struct drm_display_mode *mode)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++)
+ if (drm_mode_equal(&xlnx_sdi_modes[i].mode, mode))
+ return i;
+ return -EINVAL;
+}
+
+/**
+ * xilinx_sdi_drm_add_modes - Adds SDI supported modes
+ * @connector: pointer Xilinx SDI connector
+ *
+ * Return: Count of modes added
+ *
+ * This function adds the SDI modes supported and returns its count
+ */
+static int xilinx_sdi_drm_add_modes(struct drm_connector *connector)
+{
+ int i, num_modes = 0;
+ struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++) {
+ const struct drm_display_mode *ptr = &xlnx_sdi_modes[i].mode;
+
+ mode = drm_mode_duplicate(dev, ptr);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ }
+ return num_modes;
+}
+
+static int xilinx_sdi_connector_dpms(struct drm_connector *connector,
+ int mode)
+{
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static enum drm_connector_status
+xilinx_sdi_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void xilinx_sdi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xilinx_sdi_connector_funcs = {
+ .dpms = xilinx_sdi_connector_dpms,
+ .detect = xilinx_sdi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xilinx_sdi_connector_destroy,
+ .set_property = xilinx_sdi_connector_set_property,
+};
+
+static struct drm_encoder *
+xilinx_sdi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_sdi(connector)->encoder);
+}
+
+static int xilinx_sdi_get_modes(struct drm_connector *connector)
+{
+ return xilinx_sdi_drm_add_modes(connector);
+}
+
+static struct drm_connector_helper_funcs xilinx_sdi_connector_helper_funcs = {
+ .get_modes = xilinx_sdi_get_modes,
+ .best_encoder = xilinx_sdi_best_encoder,
+};
+
+/**
+ * xilinx_sdi_drm_connector_create_property - create SDI connector properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ *
+ * This function takes the xilinx SDI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void
+xilinx_sdi_drm_connector_create_property(struct drm_connector *base_connector)
+{
+ struct drm_device *dev = base_connector->dev;
+ struct xilinx_sdi *sdi = connector_to_sdi(base_connector);
+
+ sdi->is_frac_prop = drm_property_create_bool(dev, 1, "is_frac");
+ sdi->sdi_mode = drm_property_create_range(dev, 0,
+ "sdi_mode", 0, 5);
+ sdi->sdi_data_strm = drm_property_create_range(dev, 0,
+ "sdi_data_stream", 2, 8);
+}
+
+/**
+ * xilinx_sdi_drm_connector_attach_property - attach SDI connector
+ * properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ */
+static void
+xilinx_sdi_drm_connector_attach_property(struct drm_connector *base_connector)
+{
+ struct xilinx_sdi *sdi = connector_to_sdi(base_connector);
+ struct drm_mode_object *obj = &base_connector->base;
+
+ if (sdi->sdi_mode)
+ drm_object_attach_property(obj, sdi->sdi_mode, 0);
+
+ if (sdi->sdi_data_strm)
+ drm_object_attach_property(obj, sdi->sdi_data_strm, 0);
+
+ if (sdi->is_frac_prop)
+ drm_object_attach_property(obj, sdi->is_frac_prop, 0);
+}
+
+static int xilinx_sdi_create_connector(struct drm_encoder *encoder)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+ struct drm_connector *connector = &sdi->connector;
+ int ret;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xilinx_sdi_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret) {
+ dev_err(sdi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xilinx_sdi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xilinx_sdi_drm_connector_create_property(connector);
+ xilinx_sdi_drm_connector_attach_property(connector);
+
+ return 0;
+}
+
+static bool xilinx_sdi_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/**
+ * xilinx_sdi_set_display_enable - Enables the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xilinx_sdi_set_display_enable(struct xilinx_sdi *sdi)
+{
+ u32 data;
+
+ data = xilinx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_CTRL_EN_MASK;
+ /* start sdi stream */
+ xilinx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+static void xilinx_sdi_encoder_dpms(struct drm_encoder *encoder,
+ int mode)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+
+ dev_dbg(sdi->dev, "encoder dpms state: %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ xilinx_sdi_set_display_enable(sdi);
+ return;
+ default:
+ xilinx_sdi_set_display_disable(sdi);
+ xilinx_sdi_set_default_drm_properties(sdi);
+ }
+}
+
+/**
+ * xilinx_sdi_calc_st352_payld - calculate the st352 payload
+ *
+ * @sdi: pointer to SDI Tx structure
+ * @mode: DRM display mode
+ *
+ * This function calculates the st352 payload to be configured.
+ * Please refer to SMPTE ST352 documents for it.
+ * Return: return st352 payload
+ */
+static u32 xilinx_sdi_calc_st352_payld(struct xilinx_sdi *sdi,
+ struct drm_display_mode *mode)
+{
+ u8 byt1, byt2;
+ u16 is_p;
+ u32 id, sdi_mode = sdi->sdi_mod_prop_val;
+ bool is_frac = sdi->is_frac_prop_val;
+ u32 byt3 = ST352_BYTE3;
+
+ id = xilinx_sdi_get_mode_id(mode);
+ dev_dbg(sdi->dev, "mode id: %d\n", id);
+ if (mode->hdisplay == 2048 || mode->hdisplay == 4096)
+ byt3 |= XST352_2048_SHIFT;
+ /* byte 2 calculation */
+ is_p = !(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ byt2 = xlnx_sdi_modes[id].st352_byt2[is_frac];
+ if ((sdi_mode == XSDI_MODE_3GB) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN) || is_p)
+ byt2 |= XST352_PROG_PIC_MASK;
+ if (is_p && (mode->vtotal >= 1125))
+ byt2 |= XST352_PROG_TRANS_MASK;
+
+ /* byte 1 calculation */
+ byt1 = xlnx_sdi_modes[id].st352_byt1[sdi_mode];
+
+ return (ST352_BYTE4 << 24 | byt3 << 16 | byt2 << 8 | byt1);
+}
+
+/**
+ * xilinx_sdi_mode_set - drive the SDI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @mode: DRM kernel-internal display mode structure
+ * @adjusted_mode: SDI panel timing parameters
+ *
+ * This function derives the SDI IP timing parameters from the timing
+ * values given by VTC driver.
+ */
+static void xilinx_sdi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+ struct videomode vm;
+ u32 payload, i;
+
+ xilinx_sdi_set_config_parameters(sdi);
+
+ /* set st352 payloads */
+ payload = xilinx_sdi_calc_st352_payld(sdi, adjusted_mode);
+ dev_dbg(sdi->dev, "payload : %0x\n", payload);
+
+ for (i = 0; i < sdi->sdi_data_strm_prop_val / 2; i++) {
+ if (sdi->sdi_mod_prop_val == XSDI_MODE_3GB)
+ payload |= (i << 1) << XSDI_CH_SHIFT;
+ xilinx_sdi_set_payload_data(sdi, i, payload);
+ }
+
+ /* UHDSDI is fixed 2 pixels per clock, horizontal timings div by 2 */
+ vm.hactive = adjusted_mode->hdisplay / PIXELS_PER_CLK;
+ vm.hfront_porch = (adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay) / PIXELS_PER_CLK;
+ vm.hback_porch = (adjusted_mode->htotal -
+ adjusted_mode->hsync_end) / PIXELS_PER_CLK;
+ vm.hsync_len = (adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start) / PIXELS_PER_CLK;
+
+ vm.vactive = adjusted_mode->vdisplay;
+ vm.vfront_porch = adjusted_mode->vsync_start -
+ adjusted_mode->vdisplay;
+ vm.vback_porch = adjusted_mode->vtotal -
+ adjusted_mode->vsync_end;
+ vm.vsync_len = adjusted_mode->vsync_end -
+ adjusted_mode->vsync_start;
+ vm.flags = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vm.flags |= DISPLAY_FLAGS_INTERLACED;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ vm.flags |= DISPLAY_FLAGS_HSYNC_LOW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ vm.flags |= DISPLAY_FLAGS_VSYNC_LOW;
+
+ xilinx_vtc_config_sig(sdi->vtc, &vm);
+}
+
+static void xilinx_sdi_prepare(struct drm_encoder *encoder)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+ u32 reg;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+
+ reg = xilinx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ reg |= XSDI_TX_CTRL_INS_CRC_MASK | XSDI_TX_CTRL_INS_ST352_MASK |
+ XSDI_TX_CTRL_OVR_ST352_MASK | XSDI_TX_CTRL_INS_SYNC_BIT_MASK |
+ XSDI_TX_CTRL_INS_EDH_MASK;
+ xilinx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, reg);
+ xilinx_sdi_writel(sdi->base, XSDI_TX_IER_STAT, XSDI_IER_EN_MASK);
+ xilinx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 1);
+ xilinx_vtc_reset(sdi->vtc);
+}
+
+static void xilinx_sdi_commit(struct drm_encoder *encoder)
+{
+ struct xilinx_sdi *sdi = encoder_to_sdi(encoder);
+ u32 ret;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+ xilinx_sdi_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ ret = wait_event_interruptible_timeout(sdi->wait_event,
+ sdi->event_received,
+ usecs_to_jiffies(GT_TIMEOUT));
+ if (!ret) {
+ dev_err(sdi->dev, "Timeout: GT interrupt not received\n");
+ return;
+ }
+ sdi->event_received = false;
+ /* enable sdi bridge, vtc and Axi4s_vid_out_ctrl */
+ xilinx_en_bridge(sdi);
+ xilinx_vtc_enable(sdi->vtc);
+ xilinx_en_axi4s(sdi);
+}
+
+static const struct drm_encoder_helper_funcs xilinx_sdi_encoder_helper_funcs = {
+ .dpms = xilinx_sdi_encoder_dpms,
+ .mode_fixup = xilinx_sdi_mode_fixup,
+ .mode_set = xilinx_sdi_mode_set,
+ .prepare = xilinx_sdi_prepare,
+ .commit = xilinx_sdi_commit,
+};
+
+static const struct drm_encoder_funcs xilinx_sdi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xilinx_sdi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xilinx_sdi *sdi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &sdi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * SDI tx drivers. DRM framework can support more than one CRTCs and
+ * SDI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+
+ drm_encoder_init(drm_dev, encoder, &xilinx_sdi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+ drm_encoder_helper_add(encoder, &xilinx_sdi_encoder_helper_funcs);
+
+ ret = xilinx_sdi_create_connector(encoder);
+ if (ret) {
+ dev_err(sdi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ }
+ return ret;
+}
+
+static void xilinx_sdi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xilinx_sdi *sdi = dev_get_drvdata(dev);
+
+ xilinx_sdi_encoder_dpms(&sdi->encoder, DRM_MODE_DPMS_OFF);
+ drm_encoder_cleanup(&sdi->encoder);
+ drm_connector_cleanup(&sdi->connector);
+}
+
+static const struct component_ops xilinx_sdi_component_ops = {
+ .bind = xilinx_sdi_bind,
+ .unbind = xilinx_sdi_unbind,
+};
+
+static irqreturn_t xilinx_sdi_vblank_handler(int irq, void *data)
+{
+ struct xilinx_sdi *sdi = (struct xilinx_sdi *)data;
+ u32 intr = xilinx_vtc_intr_get(sdi->vtc);
+
+ if (!intr)
+ return IRQ_NONE;
+
+ if (sdi->vblank_fn)
+ sdi->vblank_fn(sdi->vblank_data);
+
+ xilinx_vtc_intr_clear(sdi->vtc, intr);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_drm_sdi_enable_vblank - Enable the vblank handling
+ * @sdi: SDI subsystem
+ * @vblank_fn: callback to be called on vblank event
+ * @vblank_data: data to be used in @vblank_fn
+ *
+ * This function register the vblank handler, and the handler will be triggered
+ * on vblank event after.
+ */
+void xilinx_drm_sdi_enable_vblank(struct xilinx_sdi *sdi,
+ void (*vblank_fn)(void *),
+ void *vblank_data)
+{
+ sdi->vblank_fn = vblank_fn;
+ sdi->vblank_data = vblank_data;
+ xilinx_vtc_vblank_enable(sdi->vtc);
+}
+EXPORT_SYMBOL_GPL(xilinx_drm_sdi_enable_vblank);
+
+/**
+ * xilinx_drm_sdi_disable_vblank - Disable the vblank handling
+ * @sdi: SDI subsystem
+ *
+ * Disable the vblank handler. The vblank handler and data are unregistered.
+ */
+void xilinx_drm_sdi_disable_vblank(struct xilinx_sdi *sdi)
+{
+ sdi->vblank_fn = NULL;
+ sdi->vblank_data = NULL;
+ xilinx_vtc_vblank_disable(sdi->vtc);
+}
+
+/**
+ * xilinx_sdi_register_device - Register the SDI subsystem to the global list
+ * @sdi: SDI subsystem
+ *
+ * Register the SDI subsystem instance to the global list
+ */
+static void xilinx_sdi_register_device(struct xilinx_sdi *sdi)
+{
+ mutex_lock(&xilinx_sdi_lock);
+ list_add_tail(&sdi->list, &xilinx_sdi_list);
+ mutex_unlock(&xilinx_sdi_lock);
+}
+
+/**
+ * xilinx_drm_sdi_of_get - Get the SDI subsystem instance
+ * @np: parent device node
+ *
+ * This function searches and returns a SDI subsystem structure for
+ * the parent device node, @np. The SDI subsystem node should be a child node
+ * of @np, with 'xlnx,sdi' property pointing to the SDI device node.
+ * An instance can be shared by multiple users.
+ *
+ * Return: corresponding SDI subsystem structure if found. NULL if
+ * the device node doesn't have 'xlnx,sdi' property, or -EPROBE_DEFER error
+ * pointer if the the SDI subsystem isn't found.
+ */
+struct xilinx_sdi *xilinx_drm_sdi_of_get(struct device_node *np)
+{
+ struct xilinx_sdi *found = NULL;
+ struct xilinx_sdi *sdi;
+ struct device_node *sdi_node;
+
+ if (!of_find_property(np, "xlnx,sdi", NULL))
+ return NULL;
+
+ sdi_node = of_parse_phandle(np, "xlnx,sdi", 0);
+ if (!sdi_node)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&xilinx_sdi_lock);
+ list_for_each_entry(sdi, &xilinx_sdi_list, list) {
+ if (sdi->dev->of_node == sdi_node) {
+ found = sdi;
+ break;
+ }
+ }
+ mutex_unlock(&xilinx_sdi_lock);
+
+ of_node_put(sdi_node);
+ if (!found)
+ return ERR_PTR(-EPROBE_DEFER);
+ return found;
+}
+
+/**
+ * xilinx_sdi_unregister_device - Unregister the SDI subsystem instance
+ * @sdi: SDI subsystem
+ *
+ * Unregister the SDI subsystem instance from the global list
+ */
+static void xilinx_sdi_unregister_device(struct xilinx_sdi *sdi)
+{
+ mutex_lock(&xilinx_sdi_lock);
+ list_del(&sdi->list);
+ mutex_unlock(&xilinx_sdi_lock);
+}
+
+static int xilinx_sdi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_sdi *sdi;
+ struct device_node *vtc_node;
+ int ret, irq;
+
+ sdi = devm_kzalloc(dev, sizeof(*sdi), GFP_KERNEL);
+ if (!sdi)
+ return -ENOMEM;
+
+ sdi->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdi->base = devm_ioremap_resource(dev, res);
+
+ if (IS_ERR(sdi->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(sdi->base);
+ }
+ platform_set_drvdata(pdev, sdi);
+
+ vtc_node = of_parse_phandle(sdi->dev->of_node, "xlnx,vtc", 0);
+ if (!vtc_node) {
+ dev_err(dev, "vtc node not present\n");
+ return PTR_ERR(vtc_node);
+ }
+ sdi->vtc = xilinx_vtc_probe(sdi->dev, vtc_node);
+ of_node_put(vtc_node);
+ if (IS_ERR(sdi->vtc)) {
+ dev_err(dev, "failed to probe VTC\n");
+ return PTR_ERR(sdi->vtc);
+ }
+
+ /* disable interrupt */
+ xilinx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(sdi->dev, irq, NULL,
+ xilinx_sdi_irq_handler, IRQF_ONESHOT,
+ dev_name(sdi->dev), sdi);
+ if (ret < 0)
+ return ret;
+
+ irq = platform_get_irq(pdev, 1); /* vblank interrupt */
+ if (irq < 0)
+ return irq;
+ ret = devm_request_threaded_irq(sdi->dev, irq, NULL,
+ xilinx_sdi_vblank_handler, IRQF_ONESHOT,
+ "sdiTx-vblank", sdi);
+ if (ret < 0)
+ return ret;
+
+ init_waitqueue_head(&sdi->wait_event);
+ sdi->event_received = false;
+
+ xilinx_sdi_register_device(sdi);
+ return component_add(dev, &xilinx_sdi_component_ops);
+}
+
+static int xilinx_sdi_remove(struct platform_device *pdev)
+{
+ struct xilinx_sdi *sdi = platform_get_drvdata(pdev);
+
+ xilinx_sdi_unregister_device(sdi);
+ component_del(&pdev->dev, &xilinx_sdi_component_ops);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_sdi_of_match[] = {
+ { .compatible = "xlnx,v-smpte-uhdsdi-tx-ss"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_sdi_of_match);
+
+static struct platform_driver sdi_tx_driver = {
+ .probe = xilinx_sdi_probe,
+ .remove = xilinx_sdi_remove,
+ .driver = {
+ .name = "xlnx,uhdsdi-tx",
+ .of_match_table = xilinx_sdi_of_match,
+ },
+};
+
+module_platform_driver(sdi_tx_driver);
+
+MODULE_AUTHOR("Saurabh Sengar <saurabhs@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA SDI Tx Driver");
diff --git a/drivers/gpu/drm/xilinx/xilinx_drm_sdi.h b/drivers/gpu/drm/xilinx/xilinx_drm_sdi.h
new file mode 100644
index 000000000000..b9a773eef094
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_drm_sdi.h
@@ -0,0 +1,29 @@
+/*
+ * SDI subsystem header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_DRM_SDI_H_
+#define _XILINX_DRM_SDI_H_
+
+struct xilinx_sdi;
+struct device_node;
+
+struct xilinx_sdi *xilinx_drm_sdi_of_get(struct device_node *np);
+void xilinx_drm_sdi_enable_vblank(struct xilinx_sdi *sdi,
+ void (*vblank_fn)(void *),
+ void *vblank_data);
+void xilinx_drm_sdi_disable_vblank(struct xilinx_sdi *sdi);
+#endif /* _XILINX_DRM_SDI_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_osd.c b/drivers/gpu/drm/xilinx/xilinx_osd.c
new file mode 100644
index 000000000000..b777fbbed5b8
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_osd.c
@@ -0,0 +1,382 @@
+/*
+ * Xilinx OSD support
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#include "xilinx_drm_drv.h"
+
+#include "xilinx_osd.h"
+
+/* registers */
+#define OSD_CTL 0x000 /* control */
+#define OSD_SS 0x020 /* screen size */
+#define OSD_ENC 0x028 /* encoding register */
+#define OSD_BC0 0x100 /* background color channel 0 */
+#define OSD_BC1 0x104 /* background color channel 1 */
+#define OSD_BC2 0x108 /* background color channel 2 */
+
+#define OSD_L0C 0x110 /* layer 0 control */
+
+/* register offset of layers */
+#define OSD_LAYER_SIZE 0x10
+#define OSD_LXC 0x00 /* layer control */
+#define OSD_LXP 0x04 /* layer position */
+#define OSD_LXS 0x08 /* layer size */
+
+/*osd control register bit definition */
+#define OSD_CTL_RUE (1 << 1) /* osd reg update enable */
+#define OSD_CTL_EN (1 << 0) /* osd enable */
+
+/* osd screen size register bit definition */
+#define OSD_SS_YSIZE_MASK 0x0fff0000 /* vertical height of OSD output */
+#define OSD_SS_YSIZE_SHIFT 16 /* bit shift of OSD_SS_YSIZE_MASK */
+#define OSD_SS_XSIZE_MASK 0x00000fff /* horizontal width of OSD output */
+
+/* osd vidoe format mask */
+#define OSD_VIDEO_FORMAT_MASK 0x0000000f /* video format */
+
+/* osd background color channel 0 */
+#define OSD_BC0_YG_MASK 0x000000ff /* Y (luma) or Green */
+
+/* osd background color channel 1 */
+#define OSD_BC1_UCBB_MASK 0x000000ff /* U (Cb) or Blue */
+
+/* osd background color channel 2 */
+#define OSD_BC2_VCRR_MASK 0x000000ff /* V(Cr) or Red */
+
+/* maximum number of the layers */
+#define OSD_MAX_NUM_OF_LAYERS 8
+
+/* osd layer control (layer 0 through (OSD_MAX_NUM_OF_LAYERS - 1)) */
+#define OSD_LXC_ALPHA_MASK 0x0fff0000 /* global alpha value */
+#define OSD_LXC_ALPHA_SHIFT 16 /* bit shift of alpha value */
+#define OSD_LXC_PRIORITY_MASK 0x00000700 /* layer priority */
+#define OSD_LXC_PRIORITY_SHIFT 8 /* bit shift of priority */
+#define OSD_LXC_GALPHAEN (1 << 1) /* global alpha enable */
+#define OSD_LXC_EN (1 << 0) /* layer enable */
+
+/* osd layer position (layer 0 through (OSD_MAX_NUM_OF_LAYERS - 1)) */
+#define OSD_LXP_YSTART_MASK 0x0fff0000 /* vert start line */
+#define OSD_LXP_YSTART_SHIFT 16 /* vert start line bit shift */
+#define OSD_LXP_XSTART_MASK 0x00000fff /* horizontal start pixel */
+
+/* osd layer size (layer 0 through (OSD_MAX_NUM_OF_LAYERS - 1)) */
+#define OSD_LXS_YSIZE_MASK 0x0fff0000 /* vert size */
+#define OSD_LXS_YSIZE_SHIFT 16 /* vertical size bit shift */
+#define OSD_LXS_XSIZE_MASK 0x00000fff /* horizontal size of layer */
+
+/* osd software reset */
+#define OSD_RST_RESET (1 << 31)
+
+/**
+ * struct xilinx_osd_layer - Xilinx OSD layer object
+ *
+ * @base: base address
+ * @id: id
+ * @avail: available flag
+ * @osd: osd
+ */
+struct xilinx_osd_layer {
+ void __iomem *base;
+ int id;
+ bool avail;
+ struct xilinx_osd *osd;
+};
+
+/**
+ * struct xilinx_osd - Xilinx OSD object
+ *
+ * @base: base address
+ * @layers: layers
+ * @num_layers: number of layers
+ * @max_width: maximum width
+ * @format: video format
+ */
+struct xilinx_osd {
+ void __iomem *base;
+ struct xilinx_osd_layer *layers[OSD_MAX_NUM_OF_LAYERS];
+ unsigned int num_layers;
+ unsigned int max_width;
+ unsigned int format;
+};
+
+/* osd layer operation */
+/* set layer alpha */
+void xilinx_osd_layer_set_alpha(struct xilinx_osd_layer *layer, u32 alpha)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+ DRM_DEBUG_DRIVER("alpha: 0x%08x\n", alpha);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value &= ~OSD_LXC_ALPHA_MASK;
+ value |= (alpha << OSD_LXC_ALPHA_SHIFT) & OSD_LXC_ALPHA_MASK;
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+void xilinx_osd_layer_enable_alpha(struct xilinx_osd_layer *layer, bool enable)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+ DRM_DEBUG_DRIVER("enable: %d\n", enable);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value = enable ? (value | OSD_LXC_GALPHAEN) :
+ (value & ~OSD_LXC_GALPHAEN);
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+/* set layer priority */
+void xilinx_osd_layer_set_priority(struct xilinx_osd_layer *layer, u32 prio)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+ DRM_DEBUG_DRIVER("prio: %d\n", prio);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value &= ~OSD_LXC_PRIORITY_MASK;
+ value |= (prio << OSD_LXC_PRIORITY_SHIFT) & OSD_LXC_PRIORITY_MASK;
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+/* set layer dimension */
+void xilinx_osd_layer_set_dimension(struct xilinx_osd_layer *layer,
+ u16 xstart, u16 ystart,
+ u16 xsize, u16 ysize)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+ DRM_DEBUG_DRIVER("w: %d(%d), h: %d(%d)\n",
+ xsize, xstart, ysize, ystart);
+
+ value = xstart & OSD_LXP_XSTART_MASK;
+ value |= (ystart << OSD_LXP_YSTART_SHIFT) & OSD_LXP_YSTART_MASK;
+
+ xilinx_drm_writel(layer->base, OSD_LXP, value);
+
+ value = xsize & OSD_LXS_XSIZE_MASK;
+ value |= (ysize << OSD_LXS_YSIZE_SHIFT) & OSD_LXS_YSIZE_MASK;
+
+ xilinx_drm_writel(layer->base, OSD_LXS, value);
+}
+
+/* enable layer */
+void xilinx_osd_layer_enable(struct xilinx_osd_layer *layer)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value |= OSD_LXC_EN;
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+/* disable layer */
+void xilinx_osd_layer_disable(struct xilinx_osd_layer *layer)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id);
+
+ value = xilinx_drm_readl(layer->base, OSD_LXC);
+ value &= ~OSD_LXC_EN;
+ xilinx_drm_writel(layer->base, OSD_LXC, value);
+}
+
+/* get an available layer */
+struct xilinx_osd_layer *xilinx_osd_layer_get(struct xilinx_osd *osd)
+{
+ struct xilinx_osd_layer *layer = NULL;
+ int i;
+
+ for (i = 0; i < osd->num_layers; i++) {
+ if (osd->layers[i]->avail) {
+ layer = osd->layers[i];
+ layer->avail = false;
+ break;
+ }
+ }
+
+ if (!layer)
+ return ERR_PTR(-ENODEV);
+
+ DRM_DEBUG_DRIVER("layer id: %d\n", i);
+
+ return layer;
+}
+
+/* put a layer */
+void xilinx_osd_layer_put(struct xilinx_osd_layer *layer)
+{
+ layer->avail = true;
+}
+
+/* osd operations */
+/* set osd color */
+void xilinx_osd_set_color(struct xilinx_osd *osd, u8 r, u8 g, u8 b)
+{
+ u32 value;
+
+ value = g;
+ xilinx_drm_writel(osd->base, OSD_BC0, value);
+ value = b;
+ xilinx_drm_writel(osd->base, OSD_BC1, value);
+ value = r;
+ xilinx_drm_writel(osd->base, OSD_BC2, value);
+}
+
+/* set osd dimension */
+void xilinx_osd_set_dimension(struct xilinx_osd *osd, u32 width, u32 height)
+{
+ u32 value;
+
+ DRM_DEBUG_DRIVER("w: %d, h: %d\n", width, height);
+
+ value = width | ((height << OSD_SS_YSIZE_SHIFT) & OSD_SS_YSIZE_MASK);
+ xilinx_drm_writel(osd->base, OSD_SS, value);
+}
+
+/* get osd number of layers */
+unsigned int xilinx_osd_get_num_layers(struct xilinx_osd *osd)
+{
+ return osd->num_layers;
+}
+
+/* get osd max width */
+unsigned int xilinx_osd_get_max_width(struct xilinx_osd *osd)
+{
+ return osd->max_width;
+}
+
+/* get osd color format */
+unsigned int xilinx_osd_get_format(struct xilinx_osd *osd)
+{
+ return osd->format;
+}
+
+/* reset osd */
+void xilinx_osd_reset(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL, OSD_RST_RESET);
+}
+
+/* enable osd */
+void xilinx_osd_enable(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL,
+ xilinx_drm_readl(osd->base, OSD_CTL) | OSD_CTL_EN);
+}
+
+/* disable osd */
+void xilinx_osd_disable(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL,
+ xilinx_drm_readl(osd->base, OSD_CTL) & ~OSD_CTL_EN);
+}
+
+/* register-update-enable osd */
+void xilinx_osd_enable_rue(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL,
+ xilinx_drm_readl(osd->base, OSD_CTL) | OSD_CTL_RUE);
+}
+
+/* register-update-enable osd */
+void xilinx_osd_disable_rue(struct xilinx_osd *osd)
+{
+ xilinx_drm_writel(osd->base, OSD_CTL,
+ xilinx_drm_readl(osd->base, OSD_CTL) & ~OSD_CTL_RUE);
+}
+
+static const struct of_device_id xilinx_osd_of_match[] = {
+ { .compatible = "xlnx,v-osd-5.01.a" },
+ { /* end of table */ },
+};
+
+struct xilinx_osd *xilinx_osd_probe(struct device *dev,
+ struct device_node *node)
+{
+ struct xilinx_osd *osd;
+ struct xilinx_osd_layer *layer;
+ const struct of_device_id *match;
+ struct resource res;
+ int i;
+ int ret;
+
+ match = of_match_node(xilinx_osd_of_match, node);
+ if (!match) {
+ dev_err(dev, "failed to match the device node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ osd = devm_kzalloc(dev, sizeof(*osd), GFP_KERNEL);
+ if (!osd)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to of_address_to_resource\n");
+ return ERR_PTR(ret);
+ }
+
+ osd->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(osd->base))
+ return ERR_CAST(osd->base);
+
+ ret = of_property_read_u32(node, "xlnx,num-layers", &osd->num_layers);
+ if (ret) {
+ dev_warn(dev, "failed to get num of layers prop\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,screen-width", &osd->max_width);
+ if (ret) {
+ dev_warn(dev, "failed to get screen width prop\n");
+ return ERR_PTR(ret);
+ }
+
+ /* read the video format set by a user */
+ osd->format = xilinx_drm_readl(osd->base, OSD_ENC) &
+ OSD_VIDEO_FORMAT_MASK;
+
+ for (i = 0; i < osd->num_layers; i++) {
+ layer = devm_kzalloc(dev, sizeof(*layer), GFP_KERNEL);
+ if (!layer)
+ return ERR_PTR(-ENOMEM);
+
+ layer->base = osd->base + OSD_L0C + OSD_LAYER_SIZE * i;
+ layer->id = i;
+ layer->osd = osd;
+ layer->avail = true;
+ osd->layers[i] = layer;
+ }
+
+ return osd;
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_osd.h b/drivers/gpu/drm/xilinx/xilinx_osd.h
new file mode 100644
index 000000000000..d84ee9117419
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_osd.h
@@ -0,0 +1,62 @@
+/*
+ * Xilinx OSD Header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_OSD_H_
+#define _XILINX_OSD_H_
+
+/* TODO: use the fixed max alpha value for 8 bit component width for now. */
+#define OSD_MAX_ALPHA 0x100
+
+struct xilinx_osd;
+struct xilinx_osd_layer;
+
+/* osd layer configuration */
+void xilinx_osd_layer_set_alpha(struct xilinx_osd_layer *layer, u32 alpha);
+void xilinx_osd_layer_enable_alpha(struct xilinx_osd_layer *layer, bool enable);
+void xilinx_osd_layer_set_priority(struct xilinx_osd_layer *layer, u32 prio);
+void xilinx_osd_layer_set_dimension(struct xilinx_osd_layer *layer,
+ u16 xstart, u16 ystart,
+ u16 xsize, u16 ysize);
+
+/* osd layer operation */
+void xilinx_osd_layer_enable(struct xilinx_osd_layer *layer);
+void xilinx_osd_layer_disable(struct xilinx_osd_layer *layer);
+struct xilinx_osd_layer *xilinx_osd_layer_get(struct xilinx_osd *osd);
+void xilinx_osd_layer_put(struct xilinx_osd_layer *layer);
+
+/* osd configuration */
+void xilinx_osd_set_color(struct xilinx_osd *osd, u8 r, u8 g, u8 b);
+void xilinx_osd_set_dimension(struct xilinx_osd *osd, u32 width, u32 height);
+
+unsigned int xilinx_osd_get_num_layers(struct xilinx_osd *osd);
+unsigned int xilinx_osd_get_max_width(struct xilinx_osd *osd);
+unsigned int xilinx_osd_get_format(struct xilinx_osd *osd);
+
+/* osd operation */
+void xilinx_osd_reset(struct xilinx_osd *osd);
+void xilinx_osd_enable(struct xilinx_osd *osd);
+void xilinx_osd_disable(struct xilinx_osd *osd);
+void xilinx_osd_enable_rue(struct xilinx_osd *osd);
+void xilinx_osd_disable_rue(struct xilinx_osd *osd);
+
+struct device;
+struct device_node;
+
+struct xilinx_osd *xilinx_osd_probe(struct device *dev,
+ struct device_node *node);
+
+#endif /* _XILINX_OSD_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.c b/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.c
new file mode 100644
index 000000000000..2d3400456cb0
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.c
@@ -0,0 +1,119 @@
+/*
+ * Xilinx rgb to yuv converter support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "xilinx_drm_drv.h"
+
+#include "xilinx_rgb2yuv.h"
+
+/* registers */
+/* control register */
+#define RGB_CONTROL 0x000
+/* active size v,h */
+#define RGB_ACTIVE_SIZE 0x020
+
+/* control register bit definition */
+#define RGB_CTL_EN (1 << 0) /* enable */
+#define RGB_CTL_RUE (1 << 1) /* register update enable */
+#define RGB_RST_RESET (1 << 31) /* instant reset */
+
+struct xilinx_rgb2yuv {
+ void __iomem *base;
+};
+
+/* enable rgb2yuv */
+void xilinx_rgb2yuv_enable(struct xilinx_rgb2yuv *rgb2yuv)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(rgb2yuv->base, RGB_CONTROL);
+ xilinx_drm_writel(rgb2yuv->base, RGB_CONTROL, reg | RGB_CTL_EN);
+}
+
+/* disable rgb2yuv */
+void xilinx_rgb2yuv_disable(struct xilinx_rgb2yuv *rgb2yuv)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(rgb2yuv->base, RGB_CONTROL);
+ xilinx_drm_writel(rgb2yuv->base, RGB_CONTROL, reg & ~RGB_CTL_EN);
+}
+
+/* configure rgb2yuv */
+void xilinx_rgb2yuv_configure(struct xilinx_rgb2yuv *rgb2yuv,
+ int hactive, int vactive)
+{
+ xilinx_drm_writel(rgb2yuv->base, RGB_ACTIVE_SIZE,
+ (vactive << 16) | hactive);
+}
+
+/* reset rgb2yuv */
+void xilinx_rgb2yuv_reset(struct xilinx_rgb2yuv *rgb2yuv)
+{
+ u32 reg;
+
+ xilinx_drm_writel(rgb2yuv->base, RGB_CONTROL, RGB_RST_RESET);
+
+ /* enable register update */
+ reg = xilinx_drm_readl(rgb2yuv->base, RGB_CONTROL);
+ xilinx_drm_writel(rgb2yuv->base, RGB_CONTROL, reg | RGB_CTL_RUE);
+}
+
+static const struct of_device_id xilinx_rgb2yuv_of_match[] = {
+ { .compatible = "xlnx,v-rgb2ycrcb-6.01.a" },
+ { /* end of table */ },
+};
+
+/* probe rgb2yuv */
+struct xilinx_rgb2yuv *xilinx_rgb2yuv_probe(struct device *dev,
+ struct device_node *node)
+{
+ struct xilinx_rgb2yuv *rgb2yuv;
+ const struct of_device_id *match;
+ struct resource res;
+ int ret;
+
+ match = of_match_node(xilinx_rgb2yuv_of_match, node);
+ if (!match) {
+ dev_err(dev, "failed to match the device node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ rgb2yuv = devm_kzalloc(dev, sizeof(*rgb2yuv), GFP_KERNEL);
+ if (!rgb2yuv)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to of_address_to_resource\n");
+ return ERR_PTR(ret);
+ }
+
+ rgb2yuv->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(rgb2yuv->base))
+ return ERR_CAST(rgb2yuv->base);
+
+ xilinx_rgb2yuv_reset(rgb2yuv);
+
+ return rgb2yuv;
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.h b/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.h
new file mode 100644
index 000000000000..d1e544ac336b
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_rgb2yuv.h
@@ -0,0 +1,35 @@
+/*
+ * Color Space Converter Header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_RGB2YUV_H_
+#define _XILINX_RGB2YUV_H_
+
+struct xilinx_rgb2yuv;
+
+void xilinx_rgb2yuv_configure(struct xilinx_rgb2yuv *rgb2yuv,
+ int hactive, int vactive);
+void xilinx_rgb2yuv_reset(struct xilinx_rgb2yuv *rgb2yuv);
+void xilinx_rgb2yuv_enable(struct xilinx_rgb2yuv *rgb2yuv);
+void xilinx_rgb2yuv_disable(struct xilinx_rgb2yuv *rgb2yuv);
+
+struct device;
+struct device_node;
+
+struct xilinx_rgb2yuv *xilinx_rgb2yuv_probe(struct device *dev,
+ struct device_node *node);
+
+#endif /* _XILINX_RGB2YUV_H_ */
diff --git a/drivers/gpu/drm/xilinx/xilinx_vtc.c b/drivers/gpu/drm/xilinx/xilinx_vtc.c
new file mode 100644
index 000000000000..67a7e4fa14bf
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_vtc.c
@@ -0,0 +1,645 @@
+/*
+ * Video Timing Controller support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#include <video/videomode.h>
+
+#include "xilinx_drm_drv.h"
+#include "xilinx_vtc.h"
+
+/* register offsets */
+#define VTC_CTL 0x000 /* control */
+#define VTC_STATS 0x004 /* status */
+#define VTC_ERROR 0x008 /* error */
+
+#define VTC_GASIZE 0x060 /* generator active size */
+#define VTC_GENC 0x068 /* generator encoding */
+#define VTC_GPOL 0x06c /* generator polarity */
+#define VTC_GHSIZE 0x070 /* generator frame horizontal size */
+#define VTC_GVSIZE 0x074 /* generator frame vertical size */
+#define VTC_GHSYNC 0x078 /* generator horizontal sync */
+#define VTC_GVBHOFF_F0 0x07c /* generator Field 0 vblank horizontal offset */
+#define VTC_GVSYNC_F0 0x080 /* generator Field 0 vertical sync */
+#define VTC_GVSHOFF_F0 0x084 /* generator Field 0 vsync horizontal offset */
+#define VTC_GVBHOFF_F1 0x088 /* generator Field 1 vblank horizontal offset */
+#define VTC_GVSYNC_F1 0x08C /* generator Field 1 vertical sync */
+#define VTC_GVSHOFF_F1 0x090 /* generator Field 1 vsync horizontal offset */
+
+#define VTC_RESET 0x000 /* reset register */
+#define VTC_ISR 0x004 /* interrupt status register */
+#define VTC_IER 0x00c /* interrupt enable register */
+
+/* control register bit */
+#define VTC_CTL_FIP (1 << 6) /* field id output polarity */
+#define VTC_CTL_ACP (1 << 5) /* active chroma output polarity */
+#define VTC_CTL_AVP (1 << 4) /* active video output polarity */
+#define VTC_CTL_HSP (1 << 3) /* hori sync output polarity */
+#define VTC_CTL_VSP (1 << 2) /* vert sync output polarity */
+#define VTC_CTL_HBP (1 << 1) /* hori blank output polarity */
+#define VTC_CTL_VBP (1 << 0) /* vert blank output polarity */
+
+#define VTC_CTL_FIPSS (1 << 26) /* field id output polarity source */
+#define VTC_CTL_ACPSS (1 << 25) /* active chroma out polarity source */
+#define VTC_CTL_AVPSS (1 << 24) /* active video out polarity source */
+#define VTC_CTL_HSPSS (1 << 23) /* hori sync out polarity source */
+#define VTC_CTL_VSPSS (1 << 22) /* vert sync out polarity source */
+#define VTC_CTL_HBPSS (1 << 21) /* hori blank out polarity source */
+#define VTC_CTL_VBPSS (1 << 20) /* vert blank out polarity source */
+
+#define VTC_CTL_VCSS (1 << 18) /* chroma source select */
+#define VTC_CTL_VASS (1 << 17) /* vertical offset source select */
+#define VTC_CTL_VBSS (1 << 16) /* vertical sync end source select */
+#define VTC_CTL_VSSS (1 << 15) /* vertical sync start source select */
+#define VTC_CTL_VFSS (1 << 14) /* vertical active size source select */
+#define VTC_CTL_VTSS (1 << 13) /* vertical frame size source select */
+
+#define VTC_CTL_HBSS (1 << 11) /* horiz sync end source select */
+#define VTC_CTL_HSSS (1 << 10) /* horiz sync start source select */
+#define VTC_CTL_HFSS (1 << 9) /* horiz active size source select */
+#define VTC_CTL_HTSS (1 << 8) /* horiz frame size source select */
+
+#define VTC_CTL_GE (1 << 2) /* vtc generator enable */
+#define VTC_CTL_RU (1 << 1) /* vtc register update */
+
+/* vtc generator horizontal 1 */
+#define VTC_GH1_BPSTART_MASK 0x1fff0000 /* horiz back porch start */
+#define VTC_GH1_BPSTART_SHIFT 16
+#define VTC_GH1_SYNCSTART_MASK 0x00001fff
+
+/* vtc generator vertical 1 (filed 0) */
+#define VTC_GV1_BPSTART_MASK 0x1fff0000 /* vertical back porch start */
+#define VTC_GV1_BPSTART_SHIFT 16
+#define VTC_GV1_SYNCSTART_MASK 0x00001fff
+
+/* vtc generator/detector vblank/vsync horizontal offset registers */
+#define VTC_XVXHOX_HEND_MASK 0x1fff0000 /* horiz offset end */
+#define VTC_XVXHOX_HEND_SHIFT 16 /* horiz offset end shift */
+#define VTC_XVXHOX_HSTART_MASK 0x00001fff /* horiz offset start */
+
+/* reset register bit definition */
+#define VTC_RESET_RESET (1 << 31) /* Software Reset */
+
+/* interrupt status/enable register bit definition */
+#define VTC_IXR_FSYNC15 (1 << 31) /* frame sync interrupt 15 */
+#define VTC_IXR_FSYNC14 (1 << 30) /* frame sync interrupt 14 */
+#define VTC_IXR_FSYNC13 (1 << 29) /* frame sync interrupt 13 */
+#define VTC_IXR_FSYNC12 (1 << 28) /* frame sync interrupt 12 */
+#define VTC_IXR_FSYNC11 (1 << 27) /* frame sync interrupt 11 */
+#define VTC_IXR_FSYNC10 (1 << 26) /* frame sync interrupt 10 */
+#define VTC_IXR_FSYNC09 (1 << 25) /* frame sync interrupt 09 */
+#define VTC_IXR_FSYNC08 (1 << 24) /* frame sync interrupt 08 */
+#define VTC_IXR_FSYNC07 (1 << 23) /* frame sync interrupt 07 */
+#define VTC_IXR_FSYNC06 (1 << 22) /* frame sync interrupt 06 */
+#define VTC_IXR_FSYNC05 (1 << 21) /* frame sync interrupt 05 */
+#define VTC_IXR_FSYNC04 (1 << 20) /* frame sync interrupt 04 */
+#define VTC_IXR_FSYNC03 (1 << 19) /* frame sync interrupt 03 */
+#define VTC_IXR_FSYNC02 (1 << 18) /* frame sync interrupt 02 */
+#define VTC_IXR_FSYNC01 (1 << 17) /* frame sync interrupt 01 */
+#define VTC_IXR_FSYNC00 (1 << 16) /* frame sync interrupt 00 */
+#define VTC_IXR_FSYNCALL_MASK (VTC_IXR_FSYNC00 | \
+ VTC_IXR_FSYNC01 | \
+ VTC_IXR_FSYNC02 | \
+ VTC_IXR_FSYNC03 | \
+ VTC_IXR_FSYNC04 | \
+ VTC_IXR_FSYNC05 | \
+ VTC_IXR_FSYNC06 | \
+ VTC_IXR_FSYNC07 | \
+ VTC_IXR_FSYNC08 | \
+ VTC_IXR_FSYNC09 | \
+ VTC_IXR_FSYNC10 | \
+ VTC_IXR_FSYNC11 | \
+ VTC_IXR_FSYNC12 | \
+ VTC_IXR_FSYNC13 | \
+ VTC_IXR_FSYNC14 | \
+ VTC_IXR_FSYNC15)
+
+#define VTC_IXR_G_AV (1 << 13) /* generator actv video intr */
+#define VTC_IXR_G_VBLANK (1 << 12) /* generator vblank interrupt */
+#define VTC_IXR_G_ALL_MASK (VTC_IXR_G_AV | \
+ VTC_IXR_G_VBLANK) /* all generator intr */
+
+#define VTC_IXR_D_AV (1 << 11) /* detector active video intr */
+#define VTC_IXR_D_VBLANK (1 << 10) /* detector vblank interrupt */
+#define VTC_IXR_D_ALL_MASK (VTC_IXR_D_AV | \
+ VTC_IXR_D_VBLANK) /* all detector intr */
+
+#define VTC_IXR_LOL (1 << 9) /* lock loss */
+#define VTC_IXR_LO (1 << 8) /* lock */
+#define VTC_IXR_LOCKALL_MASK (VTC_IXR_LOL | \
+ VTC_IXR_LO) /* all signal lock intr */
+
+#define VTC_IXR_ACL (1 << 21) /* active chroma signal lock */
+#define VTC_IXR_AVL (1 << 20) /* active video signal lock */
+#define VTC_IXR_HSL (1 << 19) /* horizontal sync signal lock */
+#define VTC_IXR_VSL (1 << 18) /* vertical sync signal lock */
+#define VTC_IXR_HBL (1 << 17) /* horizontal blank signal lock */
+#define VTC_IXR_VBL (1 << 16) /* vertical blank signal lock */
+
+#define VTC_GENC_INTERL BIT(6) /* Interlaced bit in VTC_GENC */
+/* mask for all interrupts */
+#define VTC_IXR_ALLINTR_MASK (VTC_IXR_FSYNCALL_MASK | \
+ VTC_IXR_G_ALL_MASK | \
+ VTC_IXR_D_ALL_MASK | \
+ VTC_IXR_LOCKALL_MASK)
+/**
+ * struct xilinx_vtc - Xilinx VTC object
+ *
+ * @base: base addr
+ * @irq: irq
+ * @vblank_fn: vblank handler func
+ * @vblank_data: vblank handler private data
+ */
+struct xilinx_vtc {
+ void __iomem *base;
+ int irq;
+ void (*vblank_fn)(void *);
+ void *vblank_data;
+};
+
+/**
+ * struct xilinx_vtc_polarity - vtc polarity config
+ *
+ * @active_chroma: active chroma polarity
+ * @active_video: active video polarity
+ * @field_id: field ID polarity
+ * @vblank: vblank polarity
+ * @vsync: vsync polarity
+ * @hblank: hblank polarity
+ * @hsync: hsync polarity
+ */
+struct xilinx_vtc_polarity {
+ u8 active_chroma;
+ u8 active_video;
+ u8 field_id;
+ u8 vblank;
+ u8 vsync;
+ u8 hblank;
+ u8 hsync;
+};
+
+/**
+ * struct xilinx_vtc_hori_offset - vtc horizontal offset config
+ *
+ * @v0blank_hori_start: vblank horizontal start (field 0)
+ * @v0blank_hori_end: vblank horizontal end (field 0)
+ * @v0sync_hori_start: vsync horizontal start (field 0)
+ * @v0sync_hori_end: vsync horizontal end (field 0)
+ * @v1blank_hori_start: vblank horizontal start (field 1)
+ * @v1blank_hori_end: vblank horizontal end (field 1)
+ * @v1sync_hori_start: vsync horizontal start (field 1)
+ * @v1sync_hori_end: vsync horizontal end (field 1)
+ */
+struct xilinx_vtc_hori_offset {
+ u16 v0blank_hori_start;
+ u16 v0blank_hori_end;
+ u16 v0sync_hori_start;
+ u16 v0sync_hori_end;
+ u16 v1blank_hori_start;
+ u16 v1blank_hori_end;
+ u16 v1sync_hori_start;
+ u16 v1sync_hori_end;
+};
+
+/**
+ * struct xilinx_vtc_src_config - vtc source config
+ *
+ * @field_id_pol: filed id polarity source
+ * @active_chroma_pol: active chroma polarity source
+ * @active_video_pol: active video polarity source
+ * @hsync_pol: hsync polarity source
+ * @vsync_pol: vsync polarity source
+ * @hblank_pol: hblnak polarity source
+ * @vblank_pol: vblank polarity source
+ * @vchroma: vchroma polarity start source
+ * @vactive: vactive size source
+ * @vbackporch: vbackporch start source
+ * @vsync: vsync start source
+ * @vfrontporch: vfrontporch start source
+ * @vtotal: vtotal size source
+ * @hactive: hactive start source
+ * @hbackporch: hbackporch start source
+ * @hsync: hsync start source
+ * @hfrontporch: hfrontporch start source
+ * @htotal: htotal size source
+ */
+struct xilinx_vtc_src_config {
+ u8 field_id_pol;
+ u8 active_chroma_pol;
+ u8 active_video_pol;
+ u8 hsync_pol;
+ u8 vsync_pol;
+ u8 hblank_pol;
+ u8 vblank_pol;
+
+ u8 vchroma;
+ u8 vactive;
+ u8 vbackporch;
+ u8 vsync;
+ u8 vfrontporch;
+ u8 vtotal;
+
+ u8 hactive;
+ u8 hbackporch;
+ u8 hsync;
+ u8 hfrontporch;
+ u8 htotal;
+};
+
+/* configure polarity of signals */
+static void xilinx_vtc_config_polarity(struct xilinx_vtc *vtc,
+ struct xilinx_vtc_polarity *polarity)
+{
+ u32 reg = 0;
+
+ if (polarity->active_chroma)
+ reg |= VTC_CTL_ACP;
+ if (polarity->active_video)
+ reg |= VTC_CTL_AVP;
+ if (polarity->field_id)
+ reg |= VTC_CTL_FIP;
+ if (polarity->vblank)
+ reg |= VTC_CTL_VBP;
+ if (polarity->vsync)
+ reg |= VTC_CTL_VSP;
+ if (polarity->hblank)
+ reg |= VTC_CTL_HBP;
+ if (polarity->hsync)
+ reg |= VTC_CTL_HSP;
+
+ xilinx_drm_writel(vtc->base, VTC_GPOL, reg);
+}
+
+/* configure horizontal offset */
+static void
+xilinx_vtc_config_hori_offset(struct xilinx_vtc *vtc,
+ struct xilinx_vtc_hori_offset *hori_offset)
+{
+ u32 reg;
+
+ /* Calculate and update Generator VBlank Hori field 0 */
+ reg = hori_offset->v0blank_hori_start & VTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_offset->v0blank_hori_end << VTC_XVXHOX_HEND_SHIFT) &
+ VTC_XVXHOX_HEND_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVBHOFF_F0, reg);
+
+ /* Calculate and update Generator VSync Hori field 0 */
+ reg = hori_offset->v0sync_hori_start & VTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_offset->v0sync_hori_end << VTC_XVXHOX_HEND_SHIFT) &
+ VTC_XVXHOX_HEND_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVSHOFF_F0, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ reg = hori_offset->v1blank_hori_start & VTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_offset->v1blank_hori_end << VTC_XVXHOX_HEND_SHIFT) &
+ VTC_XVXHOX_HEND_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVBHOFF_F1, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ reg = hori_offset->v1sync_hori_start & VTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_offset->v1sync_hori_end << VTC_XVXHOX_HEND_SHIFT) &
+ VTC_XVXHOX_HEND_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVSHOFF_F1, reg);
+
+}
+
+/* configure source */
+static void xilinx_vtc_config_src(struct xilinx_vtc *vtc,
+ struct xilinx_vtc_src_config *src_config)
+{
+ u32 reg;
+
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+
+ if (src_config->field_id_pol)
+ reg |= VTC_CTL_FIPSS;
+ if (src_config->active_chroma_pol)
+ reg |= VTC_CTL_ACPSS;
+ if (src_config->active_video_pol)
+ reg |= VTC_CTL_AVPSS;
+ if (src_config->hsync_pol)
+ reg |= VTC_CTL_HSPSS;
+ if (src_config->vsync_pol)
+ reg |= VTC_CTL_VSPSS;
+ if (src_config->hblank_pol)
+ reg |= VTC_CTL_HBPSS;
+ if (src_config->vblank_pol)
+ reg |= VTC_CTL_VBPSS;
+
+ if (src_config->vchroma)
+ reg |= VTC_CTL_VCSS;
+ if (src_config->vactive)
+ reg |= VTC_CTL_VASS;
+ if (src_config->vbackporch)
+ reg |= VTC_CTL_VBSS;
+ if (src_config->vsync)
+ reg |= VTC_CTL_VSSS;
+ if (src_config->vfrontporch)
+ reg |= VTC_CTL_VFSS;
+ if (src_config->vtotal)
+ reg |= VTC_CTL_VTSS;
+
+ if (src_config->hbackporch)
+ reg |= VTC_CTL_HBSS;
+ if (src_config->hsync)
+ reg |= VTC_CTL_HSSS;
+ if (src_config->hfrontporch)
+ reg |= VTC_CTL_HFSS;
+ if (src_config->htotal)
+ reg |= VTC_CTL_HTSS;
+
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg);
+}
+
+/* enable vtc */
+void xilinx_vtc_enable(struct xilinx_vtc *vtc)
+{
+ u32 reg;
+
+ /* enable a generator only for now */
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg | VTC_CTL_GE);
+}
+
+/* disable vtc */
+void xilinx_vtc_disable(struct xilinx_vtc *vtc)
+{
+ u32 reg;
+
+ /* disable a generator only for now */
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg & ~VTC_CTL_GE);
+}
+
+/* configure vtc signals */
+void xilinx_vtc_config_sig(struct xilinx_vtc *vtc,
+ struct videomode *vm)
+{
+ u32 reg;
+ u32 htotal, hactive, hsync_start, hbackporch_start;
+ u32 vtotal, vactive, vsync_start, vbackporch_start;
+ struct xilinx_vtc_hori_offset hori_offset;
+ struct xilinx_vtc_polarity polarity;
+ struct xilinx_vtc_src_config src;
+
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg & ~VTC_CTL_RU);
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vsync_len +
+ vm->vback_porch;
+
+ hactive = vm->hactive;
+ vactive = vm->vactive;
+
+ hsync_start = vm->hactive + vm->hfront_porch;
+ vsync_start = vm->vactive + vm->vfront_porch;
+
+ hbackporch_start = hsync_start + vm->hsync_len;
+ vbackporch_start = vsync_start + vm->vsync_len;
+
+ reg = htotal & 0x1fff;
+ xilinx_drm_writel(vtc->base, VTC_GHSIZE, reg);
+
+ reg = vtotal & 0x1fff;
+ reg |= reg << VTC_GV1_BPSTART_SHIFT;
+ xilinx_drm_writel(vtc->base, VTC_GVSIZE, reg);
+
+ DRM_DEBUG_DRIVER("ht: %d, vt: %d\n", htotal, vtotal);
+
+ reg = hactive & 0x1fff;
+ reg |= (vactive & 0x1fff) << 16;
+ xilinx_drm_writel(vtc->base, VTC_GASIZE, reg);
+
+ DRM_DEBUG_DRIVER("ha: %d, va: %d\n", hactive, vactive);
+
+ reg = hsync_start & VTC_GH1_SYNCSTART_MASK;
+ reg |= (hbackporch_start << VTC_GH1_BPSTART_SHIFT) &
+ VTC_GH1_BPSTART_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GHSYNC, reg);
+
+ DRM_DEBUG_DRIVER("hs: %d, hb: %d\n", hsync_start, hbackporch_start);
+
+ reg = vsync_start & VTC_GV1_SYNCSTART_MASK;
+ reg |= (vbackporch_start << VTC_GV1_BPSTART_SHIFT) &
+ VTC_GV1_BPSTART_MASK;
+ xilinx_drm_writel(vtc->base, VTC_GVSYNC_F0, reg);
+ DRM_DEBUG_DRIVER("vs: %d, vb: %d\n", vsync_start, vbackporch_start);
+
+ hori_offset.v0blank_hori_start = hactive;
+ hori_offset.v0blank_hori_end = hactive;
+ hori_offset.v0sync_hori_start = hsync_start;
+ hori_offset.v0sync_hori_end = hsync_start;
+
+ hori_offset.v1blank_hori_start = hactive;
+ hori_offset.v1blank_hori_end = hactive;
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ hori_offset.v1sync_hori_start = hsync_start - (htotal / 2);
+ hori_offset.v1sync_hori_end = hsync_start - (htotal / 2);
+ xilinx_drm_writel(vtc->base, VTC_GVSYNC_F1, reg);
+ reg = xilinx_drm_readl(vtc->base, VTC_GENC) | VTC_GENC_INTERL;
+ xilinx_drm_writel(vtc->base, VTC_GENC, reg);
+ } else {
+ hori_offset.v1sync_hori_start = hsync_start;
+ hori_offset.v1sync_hori_end = hsync_start;
+ reg = xilinx_drm_readl(vtc->base, VTC_GENC) & ~VTC_GENC_INTERL;
+ xilinx_drm_writel(vtc->base, VTC_GENC, reg);
+ }
+
+ xilinx_vtc_config_hori_offset(vtc, &hori_offset);
+ /* set up polarity */
+ memset(&polarity, 0x0, sizeof(polarity));
+ polarity.hsync = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vsync = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.hblank = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vblank = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.active_video = 1;
+ polarity.active_chroma = 1;
+ polarity.field_id = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
+ xilinx_vtc_config_polarity(vtc, &polarity);
+
+ /* set up src config */
+ memset(&src, 0x0, sizeof(src));
+ src.vchroma = 1;
+ src.vactive = 1;
+ src.vbackporch = 1;
+ src.vsync = 1;
+ src.vfrontporch = 1;
+ src.vtotal = 1;
+ src.hactive = 1;
+ src.hbackporch = 1;
+ src.hsync = 1;
+ src.hfrontporch = 1;
+ src.htotal = 1;
+ xilinx_vtc_config_src(vtc, &src);
+
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg | VTC_CTL_RU);
+}
+
+/* reset vtc */
+void xilinx_vtc_reset(struct xilinx_vtc *vtc)
+{
+ u32 reg;
+
+ xilinx_drm_writel(vtc->base, VTC_RESET, VTC_RESET_RESET);
+
+ /* enable register update */
+ reg = xilinx_drm_readl(vtc->base, VTC_CTL);
+ xilinx_drm_writel(vtc->base, VTC_CTL, reg | VTC_CTL_RU);
+}
+
+/* enable vblank interrupt */
+void xilinx_vtc_vblank_enable(struct xilinx_vtc *vtc)
+{
+ xilinx_drm_writel(vtc->base, VTC_IER, VTC_IXR_G_VBLANK |
+ xilinx_drm_readl(vtc->base, VTC_IER));
+}
+EXPORT_SYMBOL_GPL(xilinx_vtc_vblank_enable);
+
+/* enable interrupt */
+static inline void xilinx_vtc_intr_enable(struct xilinx_vtc *vtc, u32 intr)
+{
+ xilinx_drm_writel(vtc->base, VTC_IER, (intr & VTC_IXR_ALLINTR_MASK) |
+ xilinx_drm_readl(vtc->base, VTC_IER));
+}
+
+/* disable interrupt */
+static inline void xilinx_vtc_intr_disable(struct xilinx_vtc *vtc, u32 intr)
+{
+ xilinx_drm_writel(vtc->base, VTC_IER, ~(intr & VTC_IXR_ALLINTR_MASK) &
+ xilinx_drm_readl(vtc->base, VTC_IER));
+}
+
+/* disable vblank interrupt */
+void xilinx_vtc_vblank_disable(struct xilinx_vtc *vtc)
+{
+ xilinx_drm_writel(vtc->base, VTC_IER, ~(VTC_IXR_G_VBLANK) &
+ xilinx_drm_readl(vtc->base, VTC_IER));
+}
+EXPORT_SYMBOL_GPL(xilinx_vtc_vblank_disable);
+
+/* get interrupt */
+u32 xilinx_vtc_intr_get(struct xilinx_vtc *vtc)
+{
+ return xilinx_drm_readl(vtc->base, VTC_IER) &
+ xilinx_drm_readl(vtc->base, VTC_ISR) & VTC_IXR_ALLINTR_MASK;
+}
+EXPORT_SYMBOL_GPL(xilinx_vtc_intr_get);
+
+/* clear interrupt */
+void xilinx_vtc_intr_clear(struct xilinx_vtc *vtc, u32 intr)
+{
+ xilinx_drm_writel(vtc->base, VTC_ISR, intr & VTC_IXR_ALLINTR_MASK);
+}
+EXPORT_SYMBOL_GPL(xilinx_vtc_intr_clear);
+
+/* interrupt handler */
+static irqreturn_t xilinx_vtc_intr_handler(int irq, void *data)
+{
+ struct xilinx_vtc *vtc = data;
+
+ u32 intr = xilinx_vtc_intr_get(vtc);
+
+ if (!intr)
+ return IRQ_NONE;
+
+ if ((intr & VTC_IXR_G_VBLANK) && (vtc->vblank_fn))
+ vtc->vblank_fn(vtc->vblank_data);
+
+ xilinx_vtc_intr_clear(vtc, intr);
+
+ return IRQ_HANDLED;
+}
+
+/* enable vblank interrupt */
+void xilinx_vtc_enable_vblank_intr(struct xilinx_vtc *vtc,
+ void (*vblank_fn)(void *),
+ void *vblank_priv)
+{
+ vtc->vblank_fn = vblank_fn;
+ vtc->vblank_data = vblank_priv;
+ xilinx_vtc_intr_enable(vtc, VTC_IXR_G_VBLANK);
+}
+
+/* disable vblank interrupt */
+void xilinx_vtc_disable_vblank_intr(struct xilinx_vtc *vtc)
+{
+ xilinx_vtc_intr_disable(vtc, VTC_IXR_G_VBLANK);
+ vtc->vblank_data = NULL;
+ vtc->vblank_fn = NULL;
+}
+
+static const struct of_device_id xilinx_vtc_of_match[] = {
+ { .compatible = "xlnx,v-tc-5.01.a" },
+ { /* end of table */ },
+};
+
+/* probe vtc */
+struct xilinx_vtc *xilinx_vtc_probe(struct device *dev,
+ struct device_node *node)
+{
+ struct xilinx_vtc *vtc;
+ const struct of_device_id *match;
+ struct resource res;
+ int ret;
+
+ match = of_match_node(xilinx_vtc_of_match, node);
+ if (!match) {
+ dev_err(dev, "failed to match the device node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ vtc = devm_kzalloc(dev, sizeof(*vtc), GFP_KERNEL);
+ if (!vtc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "failed to of_address_to_resource\n");
+ return ERR_PTR(ret);
+ }
+
+ vtc->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(vtc->base))
+ return ERR_CAST(vtc->base);
+
+ xilinx_vtc_intr_disable(vtc, VTC_IXR_ALLINTR_MASK);
+ vtc->irq = irq_of_parse_and_map(node, 0);
+ if (vtc->irq > 0) {
+ ret = devm_request_irq(dev, vtc->irq, xilinx_vtc_intr_handler,
+ IRQF_SHARED, "xilinx_vtc", vtc);
+ if (ret) {
+ dev_warn(dev, "failed to requet_irq() for vtc\n");
+ return ERR_PTR(ret);
+ }
+ }
+
+ xilinx_vtc_reset(vtc);
+
+ return vtc;
+}
diff --git a/drivers/gpu/drm/xilinx/xilinx_vtc.h b/drivers/gpu/drm/xilinx/xilinx_vtc.h
new file mode 100644
index 000000000000..33b4eb43513d
--- /dev/null
+++ b/drivers/gpu/drm/xilinx/xilinx_vtc.h
@@ -0,0 +1,44 @@
+/*
+ * Video Timing Controller Header for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_VTC_H_
+#define _XILINX_VTC_H_
+
+struct xilinx_vtc;
+
+struct videomode;
+
+void xilinx_vtc_config_sig(struct xilinx_vtc *vtc,
+ struct videomode *vm);
+void xilinx_vtc_enable_vblank_intr(struct xilinx_vtc *vtc,
+ void (*fn)(void *), void *data);
+void xilinx_vtc_disable_vblank_intr(struct xilinx_vtc *vtc);
+void xilinx_vtc_reset(struct xilinx_vtc *vtc);
+void xilinx_vtc_enable(struct xilinx_vtc *vtc);
+void xilinx_vtc_disable(struct xilinx_vtc *vtc);
+
+struct device;
+struct device_node;
+
+struct xilinx_vtc *xilinx_vtc_probe(struct device *dev,
+ struct device_node *node);
+void xilinx_vtc_vblank_enable(struct xilinx_vtc *vtc);
+void xilinx_vtc_vblank_disable(struct xilinx_vtc *vtc);
+u32 xilinx_vtc_intr_get(struct xilinx_vtc *vtc);
+void xilinx_vtc_intr_clear(struct xilinx_vtc *vtc, u32 intr);
+
+#endif /* _XILINX_VTC_H_ */
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
new file mode 100644
index 000000000000..c7b695e83f2f
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -0,0 +1,104 @@
+config DRM_XLNX
+ tristate "Xilinx DRM KMS Driver"
+ depends on DRM && OF
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ Xilinx DRM KMS driver. Choose this option if you have
+ a Xilinx SoCs with hardened display pipeline or soft
+ display pipeline using Xilinx IPs in FPGA. This module
+ provides the kernel mode setting functionalities
+ for Xilinx display drivers.
+
+config DRM_XLNX_BRIDGE
+ tristate "Xilinx DRM KMS bridge"
+ depends on DRM_XLNX
+ help
+ Xilinx DRM KMS bridge. This module provides some interfaces
+ to enable inter-module communication. Choose this option
+ from the provider driver when the Xilinx bridge interface is
+ needed.
+
+config DRM_XLNX_BRIDGE_DEBUG_FS
+ bool "Xilinx DRM KMS bridge debugfs"
+ depends on DEBUG_FS && DRM_XLNX_BRIDGE
+ help
+ Enable the debugfs code for Xilinx bridge. The debugfs code
+ enables debugging or testing related features. It exposes some
+ low level controls to the user space to help testing automation,
+ as well as can enable additional diagnostic or statistical
+ information.
+
+config DRM_ZYNQMP_DPSUB
+ tristate "ZynqMP DP Subsystem Driver"
+ depends on ARCH_ZYNQMP && OF && DRM_XLNX && COMMON_CLK
+ select XILINX_DPDMA
+ select PHY_XILINX_ZYNQMP
+ help
+ DRM KMS driver for ZynqMP DP Subsystem controller. Choose
+ this option if you have a Xilinx ZynqMP SoC with DisplayPort
+ subsystem. The driver provides the kernel mode setting
+ functionlaities for ZynqMP DP subsystem.
+
+config DRM_XLNX_DSI
+ tristate "Xilinx DRM DSI Subsystem Driver"
+ depends on DRM_XLNX
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ select BACKLIGHT_LCD_SUPPORT
+ select BACKLIGHT_CLASS_DEVICE
+ select DRM_PANEL_SIMPLE
+ help
+ DRM driver for Xilinx MIPI-DSI.
+
+config DRM_XLNX_MIXER
+ tristate "Xilinx DRM Mixer Driver"
+ depends on DRM_XLNX
+ select VIDEOMODE_HELPERS
+ help
+ DRM driver for Xilinx Mixer driver
+
+config DRM_XLNX_PL_DISP
+ tristate "Xilinx DRM PL display driver"
+ depends on DRM_XLNX
+ select VIDEOMODE_HELPERS
+ help
+ DRM driver for Xilinx PL display driver, provides drm
+ crtc and plane object to display pipeline. You need to
+ choose this option if your display pipeline needs one
+ crtc and plane object with single DMA connected.
+
+config DRM_XLNX_SDI
+ tristate "Xilinx DRM SDI Subsystem Driver"
+ depends on DRM_XLNX
+ help
+ DRM driver for Xilinx SDI Tx Subsystem.
+
+config DRM_XLNX_BRIDGE_CSC
+ tristate "Xilinx DRM CSC Driver"
+ depends on DRM_XLNX_BRIDGE
+ help
+ DRM brige driver for color space converter of VPSS. Choose
+ this option if color space converter is connected to an encoder.
+ The driver provides set/get resolution and color format
+ functionality through bridge layer.
+
+config DRM_XLNX_BRIDGE_SCALER
+ tristate "Xilinx DRM Scaler Driver"
+ depends on DRM_XLNX_BRIDGE
+ help
+ DRM brige driver for scaler of VPSS. Choose this option
+ if scaler is connected to an encoder. The driver provides
+ upscaling, down scaling and no scaling functionality through
+ bridge layer.
+
+config DRM_XLNX_BRIDGE_VTC
+ tristate "Xilinx DRM VTC Driver"
+ depends on DRM_XLNX_BRIDGE
+ help
+ DRM brige driver for Xilinx Video Timing Controller. Choose
+ this option to make VTC a part of the CRTC in display pipeline.
+ Currently the support is added to the Xilinx Video Mixer and
+ Xilinx PL display CRTC drivers. This driver provides ability
+ to generate timings through the bridge layer.
diff --git a/drivers/gpu/drm/xlnx/Makefile b/drivers/gpu/drm/xlnx/Makefile
new file mode 100644
index 000000000000..1d80be7d3e70
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/Makefile
@@ -0,0 +1,21 @@
+xlnx_drm-objs += xlnx_crtc.o xlnx_drv.o xlnx_fb.o xlnx_gem.o
+xlnx_drm-$(CONFIG_DRM_XLNX_BRIDGE) += xlnx_bridge.o
+obj-$(CONFIG_DRM_XLNX) += xlnx_drm.o
+
+obj-$(CONFIG_DRM_XLNX_BRIDGE_CSC) += xlnx_csc.o
+
+obj-$(CONFIG_DRM_XLNX_BRIDGE_SCALER) += xlnx_scaler.o
+
+obj-$(CONFIG_DRM_XLNX_BRIDGE_VTC) += xlnx_vtc.o
+
+obj-$(CONFIG_DRM_XLNX_DSI) += xlnx_dsi.o
+
+obj-$(CONFIG_DRM_XLNX_MIXER) += xlnx_mixer.o
+
+obj-$(CONFIG_DRM_XLNX_PL_DISP) += xlnx_pl_disp.o
+
+xlnx-sdi-objs += xlnx_sdi.o xlnx_sdi_timing.o
+obj-$(CONFIG_DRM_XLNX_SDI) += xlnx-sdi.o
+
+zynqmp-dpsub-objs += zynqmp_disp.o zynqmp_dpsub.o zynqmp_dp.o
+obj-$(CONFIG_DRM_ZYNQMP_DPSUB) += zynqmp-dpsub.o
diff --git a/drivers/gpu/drm/xlnx/xlnx_bridge.c b/drivers/gpu/drm/xlnx/xlnx_bridge.c
new file mode 100644
index 000000000000..6ee462ada676
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_bridge.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM bridge driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/list.h>
+
+#include "xlnx_bridge.h"
+#include "xlnx_drv.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * Similar to drm bridge, but this can be used by any DRM driver. There
+ * is no limitation to be used by non DRM drivers as well. No complex topology
+ * is modeled, thus it's assumed that the Xilinx bridge device is directly
+ * attached to client. The client should call Xilinx bridge functions explicitly
+ * where it's needed, as opposed to drm bridge functions which are called
+ * implicitly by DRM core.
+ * One Xlnx bridge can be owned by one driver at a time.
+ */
+
+/**
+ * struct xlnx_bridge_helper - Xilinx bridge helper
+ * @xlnx_bridges: list of Xilinx bridges
+ * @lock: lock to protect @xlnx_crtcs
+ * @refcnt: reference count
+ * @error: flag if in error state
+ */
+struct xlnx_bridge_helper {
+ struct list_head xlnx_bridges;
+ struct mutex lock; /* lock for @xlnx_bridges */
+ unsigned int refcnt;
+ bool error;
+};
+
+static struct xlnx_bridge_helper helper;
+
+struct videomode;
+/*
+ * Client functions
+ */
+
+/**
+ * xlnx_bridge_enable - Enable the bridge
+ * @bridge: bridge to enable
+ *
+ * Enable bridge.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_enable(struct xlnx_bridge *bridge)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->enable)
+ return bridge->enable(bridge);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_enable);
+
+/**
+ * xlnx_bridge_disable - Disable the bridge
+ * @bridge: bridge to disable
+ *
+ * Disable bridge.
+ */
+void xlnx_bridge_disable(struct xlnx_bridge *bridge)
+{
+ if (!bridge)
+ return;
+
+ if (helper.error)
+ return;
+
+ if (bridge->disable)
+ bridge->disable(bridge);
+}
+EXPORT_SYMBOL(xlnx_bridge_disable);
+
+/**
+ * xlnx_bridge_set_input - Set the input of @bridge
+ * @bridge: bridge to set
+ * @width: width
+ * @height: height
+ * @bus_fmt: bus format (ex, MEDIA_BUS_FMT_*);
+ *
+ * Set the bridge input with height / width / format.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->set_input)
+ return bridge->set_input(bridge, width, height, bus_fmt);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_set_input);
+
+/**
+ * xlnx_bridge_get_input_fmts - Get the supported input formats
+ * @bridge: bridge to set
+ * @fmts: pointer to formats
+ * @count: pointer to format count
+ *
+ * Get the list of supported input bus formats.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->get_input_fmts)
+ return bridge->get_input_fmts(bridge, fmts, count);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_get_input_fmts);
+
+/**
+ * xlnx_bridge_set_output - Set the output of @bridge
+ * @bridge: bridge to set
+ * @width: width
+ * @height: height
+ * @bus_fmt: bus format (ex, MEDIA_BUS_FMT_*);
+ *
+ * Set the bridge output with height / width / format.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->set_output)
+ return bridge->set_output(bridge, width, height, bus_fmt);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_set_output);
+
+/**
+ * xlnx_bridge_get_output_fmts - Get the supported output formats
+ * @bridge: bridge to set
+ * @fmts: pointer to formats
+ * @count: pointer to format count
+ *
+ * Get the list of supported output bus formats.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->get_output_fmts)
+ return bridge->get_output_fmts(bridge, fmts, count);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_get_output_fmts);
+
+/**
+ * xlnx_bridge_set_timing - Set the video timing
+ * @bridge: bridge to set
+ * @vm: Videomode
+ *
+ * Set the video mode so that timing can be generated using this
+ * by the video timing controller.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_set_timing(struct xlnx_bridge *bridge, struct videomode *vm)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->set_timing) {
+ bridge->set_timing(bridge, vm);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_set_timing);
+
+/**
+ * of_xlnx_bridge_get - Get the corresponding Xlnx bridge instance
+ * @bridge_np: The device node of the bridge device
+ *
+ * The function walks through the Xlnx bridge list of @drm, and return
+ * if any registered bridge matches the device node. The returned
+ * bridge will not be accesible by others.
+ *
+ * Return: the matching Xlnx bridge instance, or NULL
+ */
+struct xlnx_bridge *of_xlnx_bridge_get(struct device_node *bridge_np)
+{
+ struct xlnx_bridge *found = NULL;
+ struct xlnx_bridge *bridge;
+
+ if (helper.error)
+ return NULL;
+
+ mutex_lock(&helper.lock);
+ list_for_each_entry(bridge, &helper.xlnx_bridges, list) {
+ if (bridge->of_node == bridge_np && !bridge->owned) {
+ found = bridge;
+ bridge->owned = true;
+ break;
+ }
+ }
+ mutex_unlock(&helper.lock);
+
+ return found;
+}
+EXPORT_SYMBOL_GPL(of_xlnx_bridge_get);
+
+/**
+ * of_xlnx_bridge_put - Put the Xlnx bridge instance
+ * @bridge: Xlnx bridge instance to release
+ *
+ * Return the @bridge. After this, the bridge will be available for
+ * other drivers to use.
+ */
+void of_xlnx_bridge_put(struct xlnx_bridge *bridge)
+{
+ if (WARN_ON(helper.error))
+ return;
+
+ mutex_lock(&helper.lock);
+ WARN_ON(!bridge->owned);
+ bridge->owned = false;
+ mutex_unlock(&helper.lock);
+}
+EXPORT_SYMBOL_GPL(of_xlnx_bridge_put);
+
+#ifdef CONFIG_DRM_XLNX_BRIDGE_DEBUG_FS
+
+#include <linux/debugfs.h>
+
+struct xlnx_bridge_debugfs_dir {
+ struct dentry *dir;
+ int ref_cnt;
+};
+
+static struct xlnx_bridge_debugfs_dir *dir;
+
+struct xlnx_bridge_debugfs_file {
+ struct dentry *file;
+ const char *status;
+};
+
+#define XLNX_BRIDGE_DEBUGFS_MAX_BYTES 16
+
+static ssize_t xlnx_bridge_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct xlnx_bridge *bridge = f->f_inode->i_private;
+ int ret;
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (*pos != 0)
+ return 0;
+
+ size = min(size, strlen(bridge->debugfs_file->status));
+ ret = copy_to_user(buf, bridge->debugfs_file->status, size);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static ssize_t xlnx_bridge_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct xlnx_bridge *bridge = f->f_inode->i_private;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ if (!strncmp(buf, "enable", 5)) {
+ xlnx_bridge_enable(bridge);
+ } else if (!strncmp(buf, "disable", 6)) {
+ xlnx_bridge_disable(bridge);
+ } else if (!strncmp(buf, "set_input", 3)) {
+ char *cmd, **tmp;
+ char *w, *h, *f;
+ u32 width, height, fmt;
+ int ret = -EINVAL;
+
+ cmd = kzalloc(size, GFP_KERNEL);
+ ret = strncpy_from_user(cmd, buf, size);
+ if (ret < 0) {
+ pr_err("%s %d failed to copy the command %s\n",
+ __func__, __LINE__, buf);
+ return ret;
+ }
+
+ tmp = &cmd;
+ strsep(tmp, " ");
+ w = strsep(tmp, " ");
+ h = strsep(tmp, " ");
+ f = strsep(tmp, " ");
+ if (w && h && f) {
+ ret = kstrtouint(w, 0, &width);
+ ret |= kstrtouint(h, 0, &height);
+ ret |= kstrtouint(f, 0, &fmt);
+ }
+
+ kfree(cmd);
+ if (ret) {
+ pr_err("%s %d invalid command: %s\n",
+ __func__, __LINE__, buf);
+ return -EINVAL;
+ }
+ xlnx_bridge_set_input(bridge, width, height, fmt);
+ }
+
+ return size;
+}
+
+static const struct file_operations xlnx_bridge_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = xlnx_bridge_debugfs_read,
+ .write = xlnx_bridge_debugfs_write,
+};
+
+static int xlnx_bridge_debugfs_register(struct xlnx_bridge *bridge)
+{
+ struct xlnx_bridge_debugfs_file *file;
+ char file_name[32];
+
+ file = kzalloc(sizeof(*file), GFP_KERNEL);
+ if (!file)
+ return -ENOMEM;
+
+ snprintf(file_name, sizeof(file_name), "xlnx_bridge-%s",
+ bridge->of_node->name);
+ file->file = debugfs_create_file(file_name, 0444, dir->dir, bridge,
+ &xlnx_bridge_debugfs_fops);
+ bridge->debugfs_file = file;
+
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_unregister(struct xlnx_bridge *bridge)
+{
+ debugfs_remove(bridge->debugfs_file->file);
+ kfree(bridge->debugfs_file);
+}
+
+static int xlnx_bridge_debugfs_init(void)
+{
+ if (dir) {
+ dir->ref_cnt++;
+ return 0;
+ }
+
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
+
+ dir->dir = debugfs_create_dir("xlnx-bridge", NULL);
+ if (!dir->dir)
+ return -ENODEV;
+ dir->ref_cnt++;
+
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_fini(void)
+{
+ if (--dir->ref_cnt)
+ return;
+
+ debugfs_remove_recursive(dir->dir);
+ dir = NULL;
+}
+
+#else
+
+static int xlnx_bridge_debugfs_register(struct xlnx_bridge *bridge)
+{
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_unregister(struct xlnx_bridge *bridge)
+{
+}
+
+static int xlnx_bridge_debugfs_init(void)
+{
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_fini(void)
+{
+}
+
+#endif
+
+/*
+ * Provider functions
+ */
+
+/**
+ * xlnx_bridge_register - Register the bridge instance
+ * @bridge: Xlnx bridge instance to register
+ *
+ * Register @bridge to be available for clients.
+ *
+ * Return: 0 on success. -EPROBE_DEFER if helper is not initialized, or
+ * -EFAULT if in error state.
+ */
+int xlnx_bridge_register(struct xlnx_bridge *bridge)
+{
+ if (!helper.refcnt)
+ return -EPROBE_DEFER;
+
+ if (helper.error)
+ return -EFAULT;
+
+ mutex_lock(&helper.lock);
+ WARN_ON(!bridge->of_node);
+ bridge->owned = false;
+ xlnx_bridge_debugfs_register(bridge);
+ list_add_tail(&bridge->list, &helper.xlnx_bridges);
+ mutex_unlock(&helper.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xlnx_bridge_register);
+
+/**
+ * xlnx_bridge_unregister - Unregister the bridge instance
+ * @bridge: Xlnx bridge instance to unregister
+ *
+ * Unregister @bridge. The bridge shouldn't be owned by any client
+ * at this point.
+ */
+void xlnx_bridge_unregister(struct xlnx_bridge *bridge)
+{
+ if (helper.error)
+ return;
+
+ mutex_lock(&helper.lock);
+ WARN_ON(bridge->owned);
+ xlnx_bridge_debugfs_unregister(bridge);
+ list_del(&bridge->list);
+ mutex_unlock(&helper.lock);
+}
+EXPORT_SYMBOL_GPL(xlnx_bridge_unregister);
+
+/*
+ * Internal functions: used by Xlnx DRM
+ */
+
+/**
+ * xlnx_bridge_helper_init - Initialize the bridge helper
+ * @void: No arg
+ *
+ * Initialize the bridge helper or increment the reference count
+ * if already initialized.
+ *
+ * Return: 0 on success, or -EFAULT if in error state.
+ */
+int xlnx_bridge_helper_init(void)
+{
+ if (helper.refcnt++ > 0) {
+ if (helper.error)
+ return -EFAULT;
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&helper.xlnx_bridges);
+ mutex_init(&helper.lock);
+ helper.error = false;
+
+ if (xlnx_bridge_debugfs_init())
+ pr_err("failed to init xlnx bridge debugfs\n");
+
+ return 0;
+}
+
+/**
+ * xlnx_bridge_helper_fini - Release the bridge helper
+ *
+ * Clean up or decrement the reference of the bridge helper.
+ */
+void xlnx_bridge_helper_fini(void)
+{
+ if (--helper.refcnt > 0)
+ return;
+
+ xlnx_bridge_debugfs_fini();
+
+ if (WARN_ON(!list_empty(&helper.xlnx_bridges))) {
+ helper.error = true;
+ pr_err("any further xlnx bridge call will fail\n");
+ }
+
+ mutex_destroy(&helper.lock);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_bridge.h b/drivers/gpu/drm/xlnx/xlnx_bridge.h
new file mode 100644
index 000000000000..64330169bd22
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_bridge.h
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM bridge header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_BRIDGE_H_
+#define _XLNX_BRIDGE_H_
+
+struct videomode;
+
+struct xlnx_bridge_debugfs_file;
+
+/**
+ * struct xlnx_bridge - Xilinx bridge device
+ * @list: list node for Xilinx bridge device list
+ * @of_node: OF node for the bridge
+ * @owned: flag if the bridge is owned
+ * @enable: callback to enable the bridge
+ * @disable: callback to disable the bridge
+ * @set_input: callback to set the input
+ * @get_input_fmts: callback to get supported input formats.
+ * @set_output: callback to set the output
+ * @get_output_fmts: callback to get supported output formats.
+ * @set_timing: callback to set timing in connected video timing controller.
+ * @debugfs_file: for debugfs support
+ */
+struct xlnx_bridge {
+ struct list_head list;
+ struct device_node *of_node;
+ bool owned;
+ int (*enable)(struct xlnx_bridge *bridge);
+ void (*disable)(struct xlnx_bridge *bridge);
+ int (*set_input)(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+ int (*get_input_fmts)(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+ int (*set_output)(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+ int (*get_output_fmts)(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+ int (*set_timing)(struct xlnx_bridge *bridge, struct videomode *vm);
+ struct xlnx_bridge_debugfs_file *debugfs_file;
+};
+
+#if IS_ENABLED(CONFIG_DRM_XLNX_BRIDGE)
+/*
+ * Helper functions: used within Xlnx DRM
+ */
+
+struct xlnx_bridge_helper;
+
+int xlnx_bridge_helper_init(void);
+void xlnx_bridge_helper_fini(void);
+
+/*
+ * Helper functions: used by client driver
+ */
+
+int xlnx_bridge_enable(struct xlnx_bridge *bridge);
+void xlnx_bridge_disable(struct xlnx_bridge *bridge);
+int xlnx_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+int xlnx_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+int xlnx_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+int xlnx_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+int xlnx_bridge_set_timing(struct xlnx_bridge *bridge, struct videomode *vm);
+struct xlnx_bridge *of_xlnx_bridge_get(struct device_node *bridge_np);
+void of_xlnx_bridge_put(struct xlnx_bridge *bridge);
+
+/*
+ * Bridge registration: used by bridge driver
+ */
+
+int xlnx_bridge_register(struct xlnx_bridge *bridge);
+void xlnx_bridge_unregister(struct xlnx_bridge *bridge);
+
+#else /* CONFIG_DRM_XLNX_BRIDGE */
+
+struct xlnx_bridge_helper;
+
+static inline inline int xlnx_bridge_helper_init(void)
+{
+ return 0;
+}
+
+static inline void xlnx_bridge_helper_fini(void)
+{
+}
+
+static inline int xlnx_bridge_enable(struct xlnx_bridge *bridge)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline void xlnx_bridge_disable(struct xlnx_bridge *bridge)
+{
+}
+
+static inline int xlnx_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline int xlnx_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline int xlnx_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline int xlnx_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static int xlnx_bridge_set_timing(struct xlnx_bridge *bridge,
+ struct videomode *vm)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline struct xlnx_bridge *
+of_xlnx_bridge_get(struct device_node *bridge_np)
+{
+ return NULL;
+}
+
+static inline void of_xlnx_bridge_put(struct xlnx_bridge *bridge)
+{
+}
+
+static inline int xlnx_bridge_register(struct xlnx_bridge *bridge)
+{
+ return 0;
+}
+
+static inline void xlnx_bridge_unregister(struct xlnx_bridge *bridge)
+{
+}
+
+#endif /* CONFIG_DRM_XLNX_BRIDGE */
+
+#endif /* _XLNX_BRIDGE_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_crtc.c b/drivers/gpu/drm/xlnx/xlnx_crtc.c
new file mode 100644
index 000000000000..d5805c923675
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_crtc.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM crtc driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/list.h>
+
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * The Xilinx CRTC layer is to enable the custom interface to CRTC drivers.
+ * The interface is used by Xilinx DRM driver where it needs CRTC
+ * functionailty. CRTC drivers should attach the desired callbacks
+ * to struct xlnx_crtc and register the xlnx_crtc with correcsponding
+ * drm_device. It's highly recommended CRTC drivers register all callbacks
+ * even though many of them are optional.
+ * The CRTC helper simply walks through the registered CRTC device,
+ * and call the callbacks.
+ */
+
+/**
+ * struct xlnx_crtc_helper - Xilinx CRTC helper
+ * @xlnx_crtcs: list of Xilinx CRTC devices
+ * @lock: lock to protect @xlnx_crtcs
+ * @drm: back pointer to DRM core
+ */
+struct xlnx_crtc_helper {
+ struct list_head xlnx_crtcs;
+ struct mutex lock; /* lock for @xlnx_crtcs */
+ struct drm_device *drm;
+};
+
+#define XLNX_CRTC_MAX_HEIGHT_WIDTH INT_MAX
+
+unsigned int xlnx_crtc_helper_get_align(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ unsigned int align = 1, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_align) {
+ tmp = crtc->get_align(crtc);
+ align = ALIGN(align, tmp);
+ }
+ }
+
+ return align;
+}
+
+u64 xlnx_crtc_helper_get_dma_mask(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u64 mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8), tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_dma_mask) {
+ tmp = crtc->get_dma_mask(crtc);
+ mask = min(mask, tmp);
+ }
+ }
+
+ return mask;
+}
+
+int xlnx_crtc_helper_get_max_width(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ int width = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_max_width) {
+ tmp = crtc->get_max_width(crtc);
+ width = min(width, tmp);
+ }
+ }
+
+ return width;
+}
+
+int xlnx_crtc_helper_get_max_height(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ int height = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_max_height) {
+ tmp = crtc->get_max_height(crtc);
+ height = min(height, tmp);
+ }
+ }
+
+ return height;
+}
+
+uint32_t xlnx_crtc_helper_get_format(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u32 format = 0, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_format) {
+ tmp = crtc->get_format(crtc);
+ if (format && format != tmp)
+ return 0;
+ format = tmp;
+ }
+ }
+
+ return format;
+}
+
+u32 xlnx_crtc_helper_get_cursor_width(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u32 width = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_cursor_width) {
+ tmp = crtc->get_cursor_width(crtc);
+ width = min(width, tmp);
+ }
+ }
+
+ return width;
+}
+
+u32 xlnx_crtc_helper_get_cursor_height(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u32 height = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_cursor_height) {
+ tmp = crtc->get_cursor_height(crtc);
+ height = min(height, tmp);
+ }
+ }
+
+ return height;
+}
+struct xlnx_crtc_helper *xlnx_crtc_helper_init(struct drm_device *drm)
+{
+ struct xlnx_crtc_helper *helper;
+
+ helper = devm_kzalloc(drm->dev, sizeof(*helper), GFP_KERNEL);
+ if (!helper)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&helper->xlnx_crtcs);
+ mutex_init(&helper->lock);
+ helper->drm = drm;
+
+ return helper;
+}
+
+void xlnx_crtc_helper_fini(struct drm_device *drm,
+ struct xlnx_crtc_helper *helper)
+{
+ if (WARN_ON(helper->drm != drm))
+ return;
+
+ if (WARN_ON(!list_empty(&helper->xlnx_crtcs)))
+ return;
+
+ mutex_destroy(&helper->lock);
+ devm_kfree(drm->dev, helper);
+}
+
+void xlnx_crtc_register(struct drm_device *drm, struct xlnx_crtc *crtc)
+{
+ struct xlnx_crtc_helper *helper = xlnx_get_crtc_helper(drm);
+
+ mutex_lock(&helper->lock);
+ list_add_tail(&crtc->list, &helper->xlnx_crtcs);
+ mutex_unlock(&helper->lock);
+}
+EXPORT_SYMBOL_GPL(xlnx_crtc_register);
+
+void xlnx_crtc_unregister(struct drm_device *drm, struct xlnx_crtc *crtc)
+{
+ struct xlnx_crtc_helper *helper = xlnx_get_crtc_helper(drm);
+
+ mutex_lock(&helper->lock);
+ list_del(&crtc->list);
+ mutex_unlock(&helper->lock);
+}
+EXPORT_SYMBOL_GPL(xlnx_crtc_unregister);
diff --git a/drivers/gpu/drm/xlnx/xlnx_crtc.h b/drivers/gpu/drm/xlnx/xlnx_crtc.h
new file mode 100644
index 000000000000..9ab57594aba8
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_crtc.h
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM crtc header
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_CRTC_H_
+#define _XLNX_CRTC_H_
+
+/**
+ * struct xlnx_crtc - Xilinx CRTC device
+ * @crtc: DRM CRTC device
+ * @list: list node for Xilinx CRTC device list
+ * @get_align: Get the alignment requirement of CRTC device
+ * @get_dma_mask: Get the dma mask of CRTC device
+ * @get_max_width: Get the maximum supported width
+ * @get_max_height: Get the maximum supported height
+ * @get_format: Get the current format of CRTC device
+ * @get_cursor_width: Get the cursor width
+ * @get_cursor_height: Get the cursor height
+ */
+struct xlnx_crtc {
+ struct drm_crtc crtc;
+ struct list_head list;
+ unsigned int (*get_align)(struct xlnx_crtc *crtc);
+ u64 (*get_dma_mask)(struct xlnx_crtc *crtc);
+ int (*get_max_width)(struct xlnx_crtc *crtc);
+ int (*get_max_height)(struct xlnx_crtc *crtc);
+ uint32_t (*get_format)(struct xlnx_crtc *crtc);
+ uint32_t (*get_cursor_width)(struct xlnx_crtc *crtc);
+ uint32_t (*get_cursor_height)(struct xlnx_crtc *crtc);
+};
+
+/*
+ * Helper functions: used within Xlnx DRM
+ */
+
+struct xlnx_crtc_helper;
+
+unsigned int xlnx_crtc_helper_get_align(struct xlnx_crtc_helper *helper);
+u64 xlnx_crtc_helper_get_dma_mask(struct xlnx_crtc_helper *helper);
+int xlnx_crtc_helper_get_max_width(struct xlnx_crtc_helper *helper);
+int xlnx_crtc_helper_get_max_height(struct xlnx_crtc_helper *helper);
+uint32_t xlnx_crtc_helper_get_format(struct xlnx_crtc_helper *helper);
+u32 xlnx_crtc_helper_get_cursor_width(struct xlnx_crtc_helper *helper);
+u32 xlnx_crtc_helper_get_cursor_height(struct xlnx_crtc_helper *helper);
+
+struct xlnx_crtc_helper *xlnx_crtc_helper_init(struct drm_device *drm);
+void xlnx_crtc_helper_fini(struct drm_device *drm,
+ struct xlnx_crtc_helper *helper);
+
+/*
+ * CRTC registration: used by other sub-driver modules
+ */
+
+static inline struct xlnx_crtc *to_xlnx_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct xlnx_crtc, crtc);
+}
+
+void xlnx_crtc_register(struct drm_device *drm, struct xlnx_crtc *crtc);
+void xlnx_crtc_unregister(struct drm_device *drm, struct xlnx_crtc *crtc);
+
+#endif /* _XLNX_CRTC_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_csc.c b/drivers/gpu/drm/xlnx/xlnx_csc.c
new file mode 100644
index 000000000000..1d4341dce570
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_csc.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VPSS CSC DRM bridge driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar rao G <vgannava@xilinx.com>
+ */
+
+/*
+ * Overview:
+ * This experimentatl driver works as a bridge driver and
+ * reused the code from V4L2.
+ * TODO:
+ * Need to implement in a modular approach to share driver code between
+ * V4L2 and DRM frameworks.
+ * Should be integrated with plane
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <uapi/linux/media-bus-format.h>
+
+#include "xlnx_bridge.h"
+
+/* Register offset */
+#define XV_CSC_AP_CTRL (0x000)
+#define XV_CSC_INVIDEOFORMAT (0x010)
+#define XV_CSC_OUTVIDEOFORMAT (0x018)
+#define XV_CSC_WIDTH (0x020)
+#define XV_CSC_HEIGHT (0x028)
+#define XV_CSC_K11 (0x050)
+#define XV_CSC_K12 (0x058)
+#define XV_CSC_K13 (0x060)
+#define XV_CSC_K21 (0x068)
+#define XV_CSC_K22 (0x070)
+#define XV_CSC_K23 (0x078)
+#define XV_CSC_K31 (0x080)
+#define XV_CSC_K32 (0x088)
+#define XV_CSC_K33 (0x090)
+#define XV_CSC_ROFFSET (0x098)
+#define XV_CSC_GOFFSET (0x0a0)
+#define XV_CSC_BOFFSET (0x0a8)
+#define XV_CSC_CLAMPMIN (0x0b0)
+#define XV_CSC_CLIPMAX (0x0b8)
+#define XV_CSC_SCALE_FACTOR (4096)
+#define XV_CSC_DIVISOR (10000)
+/* Streaming Macros */
+#define XCSC_CLAMP_MIN_ZERO (0)
+#define XCSC_AP_START BIT(0)
+#define XCSC_AP_AUTO_RESTART BIT(7)
+#define XCSC_STREAM_ON (XCSC_AP_START | XCSC_AP_AUTO_RESTART)
+#define XCSC_STREAM_OFF (0)
+/* GPIO Reset Assert/De-assert */
+#define XCSC_RESET_ASSERT (1)
+#define XCSC_RESET_DEASSERT (0)
+
+#define XCSC_MIN_WIDTH (64)
+#define XCSC_MAX_WIDTH (8192)
+#define XCSC_MIN_HEIGHT (64)
+#define XCSC_MAX_HEIGHT (4320)
+
+static const u32 xilinx_csc_video_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_VUY8_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_VYYUYY8_1X24,
+};
+
+/* vpss_csc_color_fmt - Color format type */
+enum vpss_csc_color_fmt {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+/**
+ * struct xilinx_csc - Core configuration of csc device structure
+ * @base: pointer to register base address
+ * @dev: device structure
+ * @bridge: xilinx bridge
+ * @cft_in: input color format
+ * @cft_out: output color format
+ * @color_depth: color depth
+ * @k_hw: array of hardware values
+ * @clip_max: clipping maximum value
+ * @width: width of the video
+ * @height: height of video
+ * @max_width: maximum number of pixels in a line
+ * @max_height: maximum number of lines per frame
+ * @rst_gpio: Handle to GPIO specifier to assert/de-assert the reset line
+ * @aclk: IP clock struct
+ */
+struct xilinx_csc {
+ void __iomem *base;
+ struct device *dev;
+ struct xlnx_bridge bridge;
+ enum vpss_csc_color_fmt cft_in;
+ enum vpss_csc_color_fmt cft_out;
+ u32 color_depth;
+ s32 k_hw[3][4];
+ s32 clip_max;
+ u32 width;
+ u32 height;
+ u32 max_width;
+ u32 max_height;
+ struct gpio_desc *rst_gpio;
+ struct clk *aclk;
+};
+
+static inline void xilinx_csc_write(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xilinx_csc_read(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * bridge_to_layer - Gets the parent structure
+ * @bridge: pointer to the member.
+ *
+ * Return: parent structure pointer
+ */
+static inline struct xilinx_csc *bridge_to_layer(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct xilinx_csc, bridge);
+}
+
+static void xilinx_csc_write_rgb_3x3(struct xilinx_csc *csc)
+{
+ xilinx_csc_write(csc->base, XV_CSC_K11, csc->k_hw[0][0]);
+ xilinx_csc_write(csc->base, XV_CSC_K12, csc->k_hw[0][1]);
+ xilinx_csc_write(csc->base, XV_CSC_K13, csc->k_hw[0][2]);
+ xilinx_csc_write(csc->base, XV_CSC_K21, csc->k_hw[1][0]);
+ xilinx_csc_write(csc->base, XV_CSC_K22, csc->k_hw[1][1]);
+ xilinx_csc_write(csc->base, XV_CSC_K23, csc->k_hw[1][2]);
+ xilinx_csc_write(csc->base, XV_CSC_K31, csc->k_hw[2][0]);
+ xilinx_csc_write(csc->base, XV_CSC_K32, csc->k_hw[2][1]);
+ xilinx_csc_write(csc->base, XV_CSC_K33, csc->k_hw[2][2]);
+}
+
+static void xilinx_csc_write_rgb_offset(struct xilinx_csc *csc)
+{
+ xilinx_csc_write(csc->base, XV_CSC_ROFFSET, csc->k_hw[0][3]);
+ xilinx_csc_write(csc->base, XV_CSC_GOFFSET, csc->k_hw[1][3]);
+ xilinx_csc_write(csc->base, XV_CSC_BOFFSET, csc->k_hw[2][3]);
+}
+
+static void xilinx_csc_write_coeff(struct xilinx_csc *csc)
+{
+ xilinx_csc_write_rgb_3x3(csc);
+ xilinx_csc_write_rgb_offset(csc);
+}
+
+static void xcsc_set_default_state(struct xilinx_csc *csc)
+{
+ csc->cft_in = XVIDC_CSF_YCRCB_422;
+ csc->cft_out = XVIDC_CSF_YCRCB_422;
+
+ /* This represents an identity matrix mutliped by 2^12 */
+ csc->k_hw[0][0] = XV_CSC_SCALE_FACTOR;
+ csc->k_hw[0][1] = 0;
+ csc->k_hw[0][2] = 0;
+ csc->k_hw[1][0] = 0;
+ csc->k_hw[1][1] = XV_CSC_SCALE_FACTOR;
+ csc->k_hw[1][2] = 0;
+ csc->k_hw[2][0] = 0;
+ csc->k_hw[2][1] = 0;
+ csc->k_hw[2][2] = XV_CSC_SCALE_FACTOR;
+ csc->k_hw[0][3] = 0;
+ csc->k_hw[1][3] = 0;
+ csc->k_hw[2][3] = 0;
+ csc->clip_max = ((1 << csc->color_depth) - 1);
+ xilinx_csc_write(csc->base, XV_CSC_INVIDEOFORMAT, csc->cft_in);
+ xilinx_csc_write(csc->base, XV_CSC_OUTVIDEOFORMAT, csc->cft_out);
+ xilinx_csc_write_coeff(csc);
+ xilinx_csc_write(csc->base, XV_CSC_CLIPMAX, csc->clip_max);
+ xilinx_csc_write(csc->base, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+}
+
+static void xcsc_ycrcb_to_rgb(struct xilinx_csc *csc, s32 *clip_max)
+{
+ u16 bpc_scale = (1 << (csc->color_depth - 8));
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC IP is
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations.
+ *
+ * Coefficients valid only for BT 709
+ */
+ csc->k_hw[0][0] = 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][1] = 0;
+ csc->k_hw[0][2] = 17927 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][0] = 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][1] = -2132 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][2] = -5329 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][0] = 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][1] = 21124 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][2] = 0;
+ csc->k_hw[0][3] = -248 * bpc_scale;
+ csc->k_hw[1][3] = 77 * bpc_scale;
+ csc->k_hw[2][3] = -289 * bpc_scale;
+ *clip_max = ((1 << csc->color_depth) - 1);
+}
+
+static void xcsc_rgb_to_ycrcb(struct xilinx_csc *csc, s32 *clip_max)
+{
+ u16 bpc_scale = (1 << (csc->color_depth - 8));
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations.
+ *
+ * Coefficients valid only for BT 709
+ */
+ csc->k_hw[0][0] = 1826 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][1] = 6142 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][2] = 620 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][0] = -1006 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][1] = -3386 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][2] = 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][0] = 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][1] = -3989 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][2] = -403 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][3] = 16 * bpc_scale;
+ csc->k_hw[1][3] = 128 * bpc_scale;
+ csc->k_hw[2][3] = 128 * bpc_scale;
+ *clip_max = ((1 << csc->color_depth) - 1);
+}
+
+/**
+ * xcsc_set_coeff- Sets the coefficients
+ * @csc: Pointer to csc device structure
+ *
+ * This function set the coefficients
+ *
+ */
+static void xcsc_set_coeff(struct xilinx_csc *csc)
+{
+ xilinx_csc_write(csc->base, XV_CSC_INVIDEOFORMAT, csc->cft_in);
+ xilinx_csc_write(csc->base, XV_CSC_OUTVIDEOFORMAT, csc->cft_out);
+ xilinx_csc_write_coeff(csc);
+ xilinx_csc_write(csc->base, XV_CSC_CLIPMAX, csc->clip_max);
+ xilinx_csc_write(csc->base, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+}
+
+/**
+ * xilinx_csc_bridge_enable - enabes csc core
+ * @bridge: bridge instance
+ *
+ * This function enables the csc core
+ *
+ * Return: 0 on success.
+ *
+ */
+static int xilinx_csc_bridge_enable(struct xlnx_bridge *bridge)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ xilinx_csc_write(csc->base, XV_CSC_AP_CTRL, XCSC_STREAM_ON);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_disable - disables csc core
+ * @bridge: bridge instance
+ *
+ * This function disables the csc core
+ */
+static void xilinx_csc_bridge_disable(struct xlnx_bridge *bridge)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ xilinx_csc_write(csc->base, XV_CSC_AP_CTRL, XCSC_STREAM_OFF);
+ /* Reset the Global IP Reset through GPIO */
+ gpiod_set_value_cansleep(csc->rst_gpio, XCSC_RESET_ASSERT);
+ gpiod_set_value_cansleep(csc->rst_gpio, XCSC_RESET_DEASSERT);
+}
+
+/**
+ * xilinx_csc_bridge_set_input - Sets the input parameters of csc
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the input parameters of csc
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_csc_bridge_set_input(struct xlnx_bridge *bridge, u32 width,
+ u32 height, u32 bus_fmt)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ xcsc_set_default_state(csc);
+
+ if (height > csc->max_height || height < XCSC_MIN_HEIGHT)
+ return -EINVAL;
+
+ if (width > csc->max_width || width < XCSC_MIN_WIDTH)
+ return -EINVAL;
+
+ csc->height = height;
+ csc->width = width;
+
+ switch (bus_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ csc->cft_in = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ csc->cft_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ csc->cft_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ csc->cft_in = XVIDC_CSF_YCRCB_420;
+ break;
+ default:
+ dev_dbg(csc->dev, "unsupported input video format\n");
+ return -EINVAL;
+ }
+
+ xilinx_csc_write(csc->base, XV_CSC_WIDTH, width);
+ xilinx_csc_write(csc->base, XV_CSC_HEIGHT, height);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_get_input_fmts - input formats supported by csc
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the input video formats information csc
+ * Return: 0 on success.
+ */
+static int xilinx_csc_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_csc_video_fmts;
+ *count = ARRAY_SIZE(xilinx_csc_video_fmts);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_set_output - Sets the output parameters of csc
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the output parameters of csc
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_csc_bridge_set_output(struct xlnx_bridge *bridge, u32 width,
+ u32 height, u32 bus_fmt)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ if (width != csc->width || height != csc->height)
+ return -EINVAL;
+
+ switch (bus_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ csc->cft_out = XVIDC_CSF_RGB;
+ dev_dbg(csc->dev, "Media Format Out : RGB");
+ if (csc->cft_in != MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_ycrcb_to_rgb(csc, &csc->clip_max);
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ csc->cft_out = XVIDC_CSF_YCRCB_444;
+ dev_dbg(csc->dev, "Media Format Out : YUV 444");
+ if (csc->cft_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(csc, &csc->clip_max);
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ csc->cft_out = XVIDC_CSF_YCRCB_422;
+ dev_dbg(csc->dev, "Media Format Out : YUV 422");
+ if (csc->cft_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(csc, &csc->clip_max);
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ csc->cft_out = XVIDC_CSF_YCRCB_420;
+ dev_dbg(csc->dev, "Media Format Out : YUV 420");
+ if (csc->cft_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(csc, &csc->clip_max);
+ break;
+ default:
+ dev_info(csc->dev, "unsupported output video format\n");
+ return -EINVAL;
+ }
+ xcsc_set_coeff(csc);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_get_output_fmts - output formats supported by csc
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the output video formats information csc
+ * Return: 0 on success.
+ */
+static int xilinx_csc_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_csc_video_fmts;
+ *count = ARRAY_SIZE(xilinx_csc_video_fmts);
+ return 0;
+}
+
+static int xcsc_parse_of(struct xilinx_csc *csc)
+{
+ int ret;
+ struct device_node *node = csc->dev->of_node;
+
+ csc->aclk = devm_clk_get(csc->dev, NULL);
+ if (IS_ERR(csc->aclk)) {
+ ret = PTR_ERR(csc->aclk);
+ dev_err(csc->dev, "failed to get aclk %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,video-width",
+ &csc->color_depth);
+ if (ret < 0) {
+ dev_info(csc->dev, "video width not present in DT\n");
+ return ret;
+ }
+ if (csc->color_depth != 8 && csc->color_depth != 10 &&
+ csc->color_depth != 12 && csc->color_depth != 16) {
+ dev_err(csc->dev, "Invalid video width in DT\n");
+ return -EINVAL;
+ }
+ /* Reset GPIO */
+ csc->rst_gpio = devm_gpiod_get(csc->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(csc->rst_gpio)) {
+ if (PTR_ERR(csc->rst_gpio) != -EPROBE_DEFER)
+ dev_err(csc->dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(csc->rst_gpio);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height", &csc->max_height);
+ if (ret < 0) {
+ dev_err(csc->dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (csc->max_height > XCSC_MAX_HEIGHT ||
+ csc->max_height < XCSC_MIN_HEIGHT) {
+ dev_err(csc->dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width", &csc->max_width);
+ if (ret < 0) {
+ dev_err(csc->dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (csc->max_width > XCSC_MAX_WIDTH ||
+ csc->max_width < XCSC_MIN_WIDTH) {
+ dev_err(csc->dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xilinx_csc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_csc *csc;
+ int ret;
+
+ csc = devm_kzalloc(dev, sizeof(*csc), GFP_KERNEL);
+ if (!csc)
+ return -ENOMEM;
+
+ csc->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ csc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(csc->base))
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, csc);
+ ret = xcsc_parse_of(csc);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(csc->aclk);
+ if (ret) {
+ dev_err(csc->dev, "failed to enable clock %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(csc->rst_gpio, XCSC_RESET_DEASSERT);
+ csc->bridge.enable = &xilinx_csc_bridge_enable;
+ csc->bridge.disable = &xilinx_csc_bridge_disable;
+ csc->bridge.set_input = &xilinx_csc_bridge_set_input;
+ csc->bridge.get_input_fmts = &xilinx_csc_bridge_get_input_fmts;
+ csc->bridge.set_output = &xilinx_csc_bridge_set_output;
+ csc->bridge.get_output_fmts = &xilinx_csc_bridge_get_output_fmts;
+ csc->bridge.of_node = dev->of_node;
+
+ ret = xlnx_bridge_register(&csc->bridge);
+ if (ret) {
+ dev_info(csc->dev, "Bridge registration failed\n");
+ goto err_clk;
+ }
+
+ dev_info(csc->dev, "Xilinx VPSS CSC DRM experimental driver probed\n");
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(csc->aclk);
+ return ret;
+}
+
+static int xilinx_csc_remove(struct platform_device *pdev)
+{
+ struct xilinx_csc *csc = platform_get_drvdata(pdev);
+
+ xlnx_bridge_unregister(&csc->bridge);
+ clk_disable_unprepare(csc->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_csc_of_match[] = {
+ { .compatible = "xlnx,vpss-csc"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_csc_of_match);
+
+static struct platform_driver csc_bridge_driver = {
+ .probe = xilinx_csc_probe,
+ .remove = xilinx_csc_remove,
+ .driver = {
+ .name = "xlnx,csc-bridge",
+ .of_match_table = xilinx_csc_of_match,
+ },
+};
+
+module_platform_driver(csc_bridge_driver);
+
+MODULE_AUTHOR("Venkateshwar Rao <vgannava@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA CSC Bridge Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_drv.c b/drivers/gpu/drm/xlnx/xlnx_drv.c
new file mode 100644
index 000000000000..445325407bb5
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_drv.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Driver
+ *
+ * Copyright (C) 2013 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/reservation.h>
+
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+#include "xlnx_fb.h"
+#include "xlnx_gem.h"
+
+#define DRIVER_NAME "xlnx"
+#define DRIVER_DESC "Xilinx DRM KMS Driver"
+#define DRIVER_DATE "20130509"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static uint xlnx_fbdev_vres = 2;
+module_param_named(fbdev_vres, xlnx_fbdev_vres, uint, 0444);
+MODULE_PARM_DESC(fbdev_vres,
+ "fbdev virtual resolution multiplier for fb (default: 2)");
+
+/**
+ * struct xlnx_drm - Xilinx DRM private data
+ * @drm: DRM core
+ * @crtc: Xilinx DRM CRTC helper
+ * @fb: DRM fb helper
+ * @master: logical master device for pipeline
+ * @suspend_state: atomic state for suspend / resume
+ * @is_master: A flag to indicate if this instance is fake master
+ */
+struct xlnx_drm {
+ struct drm_device *drm;
+ struct xlnx_crtc_helper *crtc;
+ struct drm_fb_helper *fb;
+ struct platform_device *master;
+ struct drm_atomic_state *suspend_state;
+ bool is_master;
+};
+
+/**
+ * xlnx_get_crtc_helper - Return the crtc helper instance
+ * @drm: DRM device
+ *
+ * Return: the crtc helper instance
+ */
+struct xlnx_crtc_helper *xlnx_get_crtc_helper(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ return xlnx_drm->crtc;
+}
+
+/**
+ * xlnx_get_align - Return the align requirement through CRTC helper
+ * @drm: DRM device
+ *
+ * Return: the alignment requirement
+ */
+unsigned int xlnx_get_align(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ return xlnx_crtc_helper_get_align(xlnx_drm->crtc);
+}
+
+/**
+ * xlnx_get_format - Return the current format of CRTC
+ * @drm: DRM device
+ *
+ * Return: the current CRTC format
+ */
+uint32_t xlnx_get_format(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ return xlnx_crtc_helper_get_format(xlnx_drm->crtc);
+}
+
+static void xlnx_output_poll_changed(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ if (xlnx_drm->fb)
+ drm_fb_helper_hotplug_event(xlnx_drm->fb);
+}
+
+static const struct drm_mode_config_funcs xlnx_mode_config_funcs = {
+ .fb_create = xlnx_fb_create,
+ .output_poll_changed = xlnx_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void xlnx_mode_config_init(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+ struct xlnx_crtc_helper *crtc = xlnx_drm->crtc;
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = xlnx_crtc_helper_get_max_width(crtc);
+ drm->mode_config.max_height = xlnx_crtc_helper_get_max_height(crtc);
+ drm->mode_config.cursor_width =
+ xlnx_crtc_helper_get_cursor_width(crtc);
+ drm->mode_config.cursor_height =
+ xlnx_crtc_helper_get_cursor_height(crtc);
+}
+
+static int xlnx_drm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct xlnx_drm *xlnx_drm = dev->dev_private;
+
+ /* This is a hacky way to allow the root user to run as a master */
+ if (!(drm_is_primary_client(file) && !dev->master) &&
+ !file->is_master && capable(CAP_SYS_ADMIN)) {
+ file->is_master = 1;
+ xlnx_drm->is_master = true;
+ }
+
+ return 0;
+}
+
+static int xlnx_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file = filp->private_data;
+ struct drm_minor *minor = file->minor;
+ struct drm_device *drm = minor->dev;
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ if (xlnx_drm->is_master) {
+ xlnx_drm->is_master = false;
+ file->is_master = 0;
+ }
+
+ return drm_release(inode, filp);
+}
+
+static void xlnx_lastclose(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ if (xlnx_drm->fb)
+ drm_fb_helper_restore_fbdev_mode_unlocked(xlnx_drm->fb);
+}
+
+static const struct file_operations xlnx_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = xlnx_drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver xlnx_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
+ DRIVER_ATOMIC | DRIVER_PRIME,
+ .open = xlnx_drm_open,
+ .lastclose = xlnx_lastclose,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = xlnx_gem_cma_dumb_create,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+ .fops = &xlnx_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int xlnx_bind(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm;
+ struct drm_device *drm;
+ const struct drm_format_info *info;
+ struct platform_device *master = to_platform_device(dev);
+ struct platform_device *pdev = to_platform_device(dev->parent);
+ int ret;
+ u32 format;
+
+ drm = drm_dev_alloc(&xlnx_drm_driver, &pdev->dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ xlnx_drm = devm_kzalloc(drm->dev, sizeof(*xlnx_drm), GFP_KERNEL);
+ if (!xlnx_drm) {
+ ret = -ENOMEM;
+ goto err_drm;
+ }
+
+ drm_mode_config_init(drm);
+ drm->mode_config.funcs = &xlnx_mode_config_funcs;
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize vblank\n");
+ goto err_xlnx_drm;
+ }
+
+ drm->irq_enabled = 1;
+ drm->dev_private = xlnx_drm;
+ xlnx_drm->drm = drm;
+ xlnx_drm->master = master;
+ drm_kms_helper_poll_init(drm);
+ platform_set_drvdata(master, xlnx_drm);
+
+ xlnx_drm->crtc = xlnx_crtc_helper_init(drm);
+ if (IS_ERR(xlnx_drm->crtc)) {
+ ret = PTR_ERR(xlnx_drm->crtc);
+ goto err_xlnx_drm;
+ }
+
+ ret = component_bind_all(&master->dev, drm);
+ if (ret)
+ goto err_crtc;
+
+ xlnx_mode_config_init(drm);
+ drm_mode_config_reset(drm);
+ dma_set_mask(drm->dev, xlnx_crtc_helper_get_dma_mask(xlnx_drm->crtc));
+
+ format = xlnx_crtc_helper_get_format(xlnx_drm->crtc);
+ info = drm_format_info(format);
+ if (info && info->depth && info->cpp[0]) {
+ unsigned int align;
+
+ align = xlnx_crtc_helper_get_align(xlnx_drm->crtc);
+ xlnx_drm->fb = xlnx_fb_init(drm, info->cpp[0] * 8, 1, align,
+ xlnx_fbdev_vres);
+ if (IS_ERR(xlnx_drm->fb)) {
+ dev_err(&pdev->dev,
+ "failed to initialize drm fb\n");
+ xlnx_drm->fb = NULL;
+ }
+ } else {
+ /* fbdev emulation is optional */
+ dev_info(&pdev->dev, "fbdev is not initialized\n");
+ }
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto err_fb;
+
+ return 0;
+
+err_fb:
+ if (xlnx_drm->fb)
+ xlnx_fb_fini(xlnx_drm->fb);
+ component_unbind_all(drm->dev, drm);
+err_crtc:
+ xlnx_crtc_helper_fini(drm, xlnx_drm->crtc);
+err_xlnx_drm:
+ drm_mode_config_cleanup(drm);
+err_drm:
+ drm_dev_put(drm);
+ return ret;
+}
+
+static void xlnx_unbind(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm = dev_get_drvdata(dev);
+ struct drm_device *drm = xlnx_drm->drm;
+
+ drm_dev_unregister(drm);
+ if (xlnx_drm->fb)
+ xlnx_fb_fini(xlnx_drm->fb);
+ component_unbind_all(&xlnx_drm->master->dev, drm);
+ xlnx_crtc_helper_fini(drm, xlnx_drm->crtc);
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_dev_put(drm);
+}
+
+static const struct component_master_ops xlnx_master_ops = {
+ .bind = xlnx_bind,
+ .unbind = xlnx_unbind,
+};
+
+static int xlnx_of_component_probe(struct device *master_dev,
+ int (*compare_of)(struct device *, void *),
+ const struct component_master_ops *m_ops)
+{
+ struct device *dev = master_dev->parent;
+ struct device_node *ep, *port, *remote, *parent;
+ struct component_match *match = NULL;
+ int i;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ component_match_add(master_dev, &match, compare_of, dev->of_node);
+
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(dev->of_node, "ports", i);
+ if (!port)
+ break;
+
+ parent = port->parent;
+ if (!of_node_cmp(parent->name, "ports"))
+ parent = parent->parent;
+ parent = of_node_get(parent);
+
+ if (!of_device_is_available(parent)) {
+ of_node_put(parent);
+ of_node_put(port);
+ continue;
+ }
+
+ component_match_add(master_dev, &match, compare_of, parent);
+ of_node_put(parent);
+ of_node_put(port);
+ }
+
+ parent = dev->of_node;
+ for (i = 0; ; i++) {
+ parent = of_node_get(parent);
+ if (!of_device_is_available(parent)) {
+ of_node_put(parent);
+ continue;
+ }
+
+ for_each_endpoint_of_node(parent, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote) ||
+ remote == dev->of_node) {
+ of_node_put(remote);
+ continue;
+ } else if (!of_device_is_available(remote->parent)) {
+ dev_warn(dev, "parent dev of %s unavailable\n",
+ remote->full_name);
+ of_node_put(remote);
+ continue;
+ }
+ component_match_add(master_dev, &match, compare_of,
+ remote);
+ of_node_put(remote);
+ }
+ of_node_put(parent);
+
+ port = of_parse_phandle(dev->of_node, "ports", i);
+ if (!port)
+ break;
+
+ parent = port->parent;
+ if (!of_node_cmp(parent->name, "ports"))
+ parent = parent->parent;
+ of_node_put(port);
+ }
+
+ return component_master_add_with_match(master_dev, m_ops, match);
+}
+
+static int xlnx_compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int xlnx_platform_probe(struct platform_device *pdev)
+{
+ return xlnx_of_component_probe(&pdev->dev, xlnx_compare_of,
+ &xlnx_master_ops);
+}
+
+static int xlnx_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &xlnx_master_ops);
+ return 0;
+}
+
+static void xlnx_platform_shutdown(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &xlnx_master_ops);
+}
+
+static int __maybe_unused xlnx_pm_suspend(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm = dev_get_drvdata(dev);
+ struct drm_device *drm = xlnx_drm->drm;
+
+ drm_kms_helper_poll_disable(drm);
+
+ xlnx_drm->suspend_state = drm_atomic_helper_suspend(drm);
+ if (IS_ERR(xlnx_drm->suspend_state)) {
+ drm_kms_helper_poll_enable(drm);
+ return PTR_ERR(xlnx_drm->suspend_state);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused xlnx_pm_resume(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm = dev_get_drvdata(dev);
+ struct drm_device *drm = xlnx_drm->drm;
+
+ drm_atomic_helper_resume(drm, xlnx_drm->suspend_state);
+ drm_kms_helper_poll_enable(drm);
+
+ return 0;
+}
+
+static const struct dev_pm_ops xlnx_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xlnx_pm_suspend, xlnx_pm_resume)
+};
+
+static struct platform_driver xlnx_driver = {
+ .probe = xlnx_platform_probe,
+ .remove = xlnx_platform_remove,
+ .shutdown = xlnx_platform_shutdown,
+ .driver = {
+ .name = "xlnx-drm",
+ .pm = &xlnx_pm_ops,
+ },
+};
+
+/* bitmap for master id */
+static u32 xlnx_master_ids = GENMASK(31, 0);
+
+/**
+ * xlnx_drm_pipeline_init - Initialize the drm pipeline for the device
+ * @pdev: The platform device to initialize the drm pipeline device
+ *
+ * This function initializes the drm pipeline device, struct drm_device,
+ * on @pdev by creating a logical master platform device. The logical platform
+ * device acts as a master device to bind slave devices and represents
+ * the entire pipeline.
+ * The logical master uses the port bindings of the calling device to
+ * figure out the pipeline topology.
+ *
+ * Return: the logical master platform device if the drm device is initialized
+ * on @pdev. Error code otherwise.
+ */
+struct platform_device *xlnx_drm_pipeline_init(struct platform_device *pdev)
+{
+ struct platform_device *master;
+ int id, ret;
+
+ id = ffs(xlnx_master_ids);
+ if (!id)
+ return ERR_PTR(-ENOSPC);
+
+ master = platform_device_alloc("xlnx-drm", id - 1);
+ if (!master)
+ return ERR_PTR(-ENOMEM);
+
+ master->dev.parent = &pdev->dev;
+ ret = platform_device_add(master);
+ if (ret)
+ goto err_out;
+
+ WARN_ON(master->id != id - 1);
+ xlnx_master_ids &= ~BIT(master->id);
+ return master;
+
+err_out:
+ platform_device_unregister(master);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(xlnx_drm_pipeline_init);
+
+/**
+ * xlnx_drm_pipeline_exit - Release the drm pipeline for the device
+ * @master: The master pipeline device to release
+ *
+ * Release the logical pipeline device returned by xlnx_drm_pipeline_init().
+ */
+void xlnx_drm_pipeline_exit(struct platform_device *master)
+{
+ xlnx_master_ids |= BIT(master->id);
+ platform_device_unregister(master);
+}
+EXPORT_SYMBOL_GPL(xlnx_drm_pipeline_exit);
+
+static int __init xlnx_drm_drv_init(void)
+{
+ xlnx_bridge_helper_init();
+ platform_driver_register(&xlnx_driver);
+ return 0;
+}
+
+static void __exit xlnx_drm_drv_exit(void)
+{
+ platform_driver_unregister(&xlnx_driver);
+ xlnx_bridge_helper_fini();
+}
+
+module_init(xlnx_drm_drv_init);
+module_exit(xlnx_drm_drv_exit);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DRM KMS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_drv.h b/drivers/gpu/drm/xlnx/xlnx_drv.h
new file mode 100644
index 000000000000..0f6595f1bd85
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_drv.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Header for Xilinx
+ *
+ * Copyright (C) 2013 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_DRV_H_
+#define _XLNX_DRV_H_
+
+struct drm_device;
+struct xlnx_crtc_helper;
+
+struct platform_device *xlnx_drm_pipeline_init(struct platform_device *parent);
+void xlnx_drm_pipeline_exit(struct platform_device *pipeline);
+
+uint32_t xlnx_get_format(struct drm_device *drm);
+unsigned int xlnx_get_align(struct drm_device *drm);
+struct xlnx_crtc_helper *xlnx_get_crtc_helper(struct drm_device *drm);
+struct xlnx_bridge_helper *xlnx_get_bridge_helper(struct drm_device *drm);
+
+#endif /* _XLNX_DRV_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_dsi.c b/drivers/gpu/drm/xlnx/xlnx_dsi.c
new file mode 100644
index 000000000000..eae5b92b46c2
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_dsi.c
@@ -0,0 +1,907 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA MIPI DSI Tx Controller driver.
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author : Saurabh Sengar <saurabhs@xilinx.com>
+ * : Siva Rajesh J <siva.rajesh.jarugula@xilinx.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+#include "xlnx_bridge.h"
+
+/* DSI Tx IP registers */
+#define XDSI_CCR 0x00
+#define XDSI_CCR_COREENB BIT(0)
+#define XDSI_CCR_CRREADY BIT(2)
+#define XDSI_PCR 0x04
+#define XDSI_PCR_VIDEOMODE(x) (((x) & 0x3) << 3)
+#define XDSI_PCR_VIDEOMODE_MASK (0x3 << 3)
+#define XDSI_PCR_VIDEOMODE_SHIFT 3
+#define XDSI_PCR_BLLPTYPE(x) ((x) << 5)
+#define XDSI_PCR_BLLPMODE(x) ((x) << 6)
+#define XDSI_PCR_EOTPENABLE(x) ((x) << 13)
+#define XDSI_GIER 0x20
+#define XDSI_ISR 0x24
+#define XDSI_IER 0x28
+#define XDSI_CMD 0x30
+#define XDSI_CMD_QUEUE_PACKET(x) ((x) & GENMASK(23, 0))
+#define XDSI_TIME1 0x50
+#define XDSI_TIME1_BLLP_BURST(x) ((x) & GENMASK(15, 0))
+#define XDSI_TIME1_HSA(x) (((x) & GENMASK(15, 0)) << 16)
+#define XDSI_TIME2 0x54
+#define XDSI_TIME2_VACT(x) ((x) & GENMASK(15, 0))
+#define XDSI_TIME2_HACT(x) (((x) & GENMASK(15, 0)) << 16)
+#define XDSI_HACT_MULTIPLIER GENMASK(1, 0)
+#define XDSI_TIME3 0x58
+#define XDSI_TIME3_HFP(x) ((x) & GENMASK(15, 0))
+#define XDSI_TIME3_HBP(x) (((x) & GENMASK(15, 0)) << 16)
+#define XDSI_TIME4 0x5c
+#define XDSI_TIME4_VFP(x) ((x) & GENMASK(7, 0))
+#define XDSI_TIME4_VBP(x) (((x) & GENMASK(7, 0)) << 8)
+#define XDSI_TIME4_VSA(x) (((x) & GENMASK(7, 0)) << 16)
+#define XDSI_LTIME 0x60
+#define XDSI_BLLP_TIME 0x64
+/*
+ * XDSI_NUM_DATA_T represents number of data types in the
+ * enum mipi_dsi_pixel_format in the MIPI DSI part of DRM framework.
+ */
+#define XDSI_NUM_DATA_T 4
+#define XDSI_VIDEO_MODE_SYNC_PULSE 0x0
+#define XDSI_VIDEO_MODE_SYNC_EVENT 0x1
+#define XDSI_VIDEO_MODE_BURST 0x2
+
+#define XDSI_DPHY_CLK_MIN 197000000000UL
+#define XDSI_DPHY_CLK_MAX 203000000000UL
+#define XDSI_DPHY_CLK_REQ 200000000000UL
+
+/**
+ * struct xlnx_dsi - Core configuration DSI Tx subsystem device structure
+ * @encoder: DRM encoder structure
+ * @dsi_host: DSI host device
+ * @connector: DRM connector structure
+ * @panel_node: MIPI DSI device panel node
+ * @panel: DRM panel structure
+ * @dev: device structure
+ * @iomem: Base address of DSI subsystem
+ * @lanes: number of active data lanes supported by DSI controller
+ * @mode_flags: DSI operation mode related flags
+ * @format: pixel format for video mode of DSI controller
+ * @vm: videomode data structure
+ * @mul_factor: multiplication factor for HACT timing parameter
+ * @eotp_prop: configurable EoTP DSI parameter
+ * @bllp_mode_prop: configurable BLLP mode DSI parameter
+ * @bllp_type_prop: configurable BLLP type DSI parameter
+ * @video_mode_prop: configurable Video mode DSI parameter
+ * @bllp_burst_time_prop: Configurable BLLP time for burst mode
+ * @cmd_queue_prop: configurable command queue
+ * @eotp_prop_val: configurable EoTP DSI parameter value
+ * @bllp_mode_prop_val: configurable BLLP mode DSI parameter value
+ * @bllp_type_prop_val: configurable BLLP type DSI parameter value
+ * @video_mode_prop_val: configurable Video mode DSI parameter value
+ * @bllp_burst_time_prop_val: Configurable BLLP time for burst mode value
+ * @cmd_queue_prop_val: configurable command queue value
+ * @bridge: bridge structure
+ * @height_out: configurable bridge output height parameter
+ * @height_out_prop_val: configurable bridge output height parameter value
+ * @width_out: configurable bridge output width parameter
+ * @width_out_prop_val: configurable bridge output width parameter value
+ * @in_fmt: configurable bridge input media format
+ * @in_fmt_prop_val: configurable media bus format value
+ * @out_fmt: configurable bridge output media format
+ * @out_fmt_prop_val: configurable media bus format value
+ * @video_aclk: Video clock
+ * @dphy_clk_200M: 200MHz DPHY clock and AXI Lite clock
+ */
+struct xlnx_dsi {
+ struct drm_encoder encoder;
+ struct mipi_dsi_host dsi_host;
+ struct drm_connector connector;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct device *dev;
+ void __iomem *iomem;
+ u32 lanes;
+ u32 mode_flags;
+ enum mipi_dsi_pixel_format format;
+ struct videomode vm;
+ u32 mul_factor;
+ struct drm_property *eotp_prop;
+ struct drm_property *bllp_mode_prop;
+ struct drm_property *bllp_type_prop;
+ struct drm_property *video_mode_prop;
+ struct drm_property *bllp_burst_time_prop;
+ struct drm_property *cmd_queue_prop;
+ bool eotp_prop_val;
+ bool bllp_mode_prop_val;
+ bool bllp_type_prop_val;
+ u32 video_mode_prop_val;
+ u32 bllp_burst_time_prop_val;
+ u32 cmd_queue_prop_val;
+ struct xlnx_bridge *bridge;
+ struct drm_property *height_out;
+ u32 height_out_prop_val;
+ struct drm_property *width_out;
+ u32 width_out_prop_val;
+ struct drm_property *in_fmt;
+ u32 in_fmt_prop_val;
+ struct drm_property *out_fmt;
+ u32 out_fmt_prop_val;
+ struct clk *video_aclk;
+ struct clk *dphy_clk_200M;
+};
+
+#define host_to_dsi(host) container_of(host, struct xlnx_dsi, dsi_host)
+#define connector_to_dsi(c) container_of(c, struct xlnx_dsi, connector)
+#define encoder_to_dsi(e) container_of(e, struct xlnx_dsi, encoder)
+
+static inline void xlnx_dsi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xlnx_dsi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xlnx_dsi_set_config_parameters - Configure DSI Tx registers with parameters
+ * given from user application.
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI structure having drm_property parameters
+ * configured from user application and writes them into DSI IP registers.
+ */
+static void xlnx_dsi_set_config_parameters(struct xlnx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = XDSI_PCR_EOTPENABLE(dsi->eotp_prop_val);
+ reg |= XDSI_PCR_VIDEOMODE(dsi->video_mode_prop_val);
+ reg |= XDSI_PCR_BLLPTYPE(dsi->bllp_type_prop_val);
+ reg |= XDSI_PCR_BLLPMODE(dsi->bllp_mode_prop_val);
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_PCR, reg);
+ /*
+ * Configure the burst time if video mode is burst.
+ * HSA of TIME1 register is ignored in this mode.
+ */
+ if (dsi->video_mode_prop_val == XDSI_VIDEO_MODE_BURST) {
+ reg = XDSI_TIME1_BLLP_BURST(dsi->bllp_burst_time_prop_val);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_CMD_QUEUE_PACKET(dsi->cmd_queue_prop_val);
+ xlnx_dsi_writel(dsi->iomem, XDSI_CMD, reg);
+
+ dev_dbg(dsi->dev, "PCR register value is = %x\n",
+ xlnx_dsi_readl(dsi->iomem, XDSI_PCR));
+}
+
+/**
+ * xlnx_dsi_set_display_mode - Configure DSI timing registers
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function writes the timing parameters of DSI IP which are
+ * retrieved from panel timing values.
+ */
+static void xlnx_dsi_set_display_mode(struct xlnx_dsi *dsi)
+{
+ struct videomode *vm = &dsi->vm;
+ u32 reg, video_mode;
+
+ reg = xlnx_dsi_readl(dsi->iomem, XDSI_PCR);
+ video_mode = (reg & XDSI_PCR_VIDEOMODE_MASK) >>
+ XDSI_PCR_VIDEOMODE_SHIFT;
+
+ /* configure the HSA value only if non_burst_sync_pluse video mode */
+ if (!video_mode &&
+ (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)) {
+ reg = XDSI_TIME1_HSA(vm->hsync_len);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_TIME4_VFP(vm->vfront_porch) |
+ XDSI_TIME4_VBP(vm->vback_porch) |
+ XDSI_TIME4_VSA(vm->vsync_len);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME4, reg);
+
+ reg = XDSI_TIME3_HFP(vm->hfront_porch) |
+ XDSI_TIME3_HBP(vm->hback_porch);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME3, reg);
+
+ dev_dbg(dsi->dev, "mul factor for parsed datatype is = %d\n",
+ (dsi->mul_factor) / 100);
+ /*
+ * The HACT parameter received from panel timing values should be
+ * divisible by 4. The reason for this is, the word count given as
+ * input to DSI controller is HACT * mul_factor. The mul_factor is
+ * 3, 2.25, 2.25, 2 respectively for RGB888, RGB666_L, RGB666_P and
+ * RGB565.
+ * e.g. for RGB666_L color format and 1080p, the word count is
+ * 1920*2.25 = 4320 which is divisible by 4 and it is a valid input
+ * to DSI controller. Based on this 2.25 mul factor, we come up with
+ * the division factor of (XDSI_HACT_MULTIPLIER) as 4 for checking
+ */
+ if ((vm->hactive & XDSI_HACT_MULTIPLIER) != 0)
+ dev_warn(dsi->dev, "Incorrect HACT will be programmed\n");
+
+ reg = XDSI_TIME2_HACT((vm->hactive) * (dsi->mul_factor) / 100) |
+ XDSI_TIME2_VACT(vm->vactive);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME2, reg);
+
+ dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
+}
+
+/**
+ * xlnx_dsi_set_display_enable - Enables the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_dsi_set_display_enable(struct xlnx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xlnx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg |= XDSI_CCR_COREENB;
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "MIPI DSI Tx controller is enabled.\n");
+}
+
+/**
+ * xlnx_dsi_set_display_disable - Disable the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_dsi_set_display_disable(struct xlnx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xlnx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg &= ~XDSI_CCR_COREENB;
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "DSI Tx is disabled. reset regs to default values\n");
+}
+
+/**
+ * xlnx_dsi_atomic_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @connector: pointer Xilinx DSI connector
+ * @state: DRM connector state
+ * @prop: pointer to the drm_property structure
+ * @val: DSI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the DSI structure property varabiles with the values.
+ * These values are later used to configure the DSI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int xlnx_dsi_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *prop, u64 val)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ dev_dbg(dsi->dev, "property name = %s, value = %lld\n",
+ prop->name, val);
+
+ if (prop == dsi->eotp_prop)
+ dsi->eotp_prop_val = !!val;
+ else if (prop == dsi->bllp_mode_prop)
+ dsi->bllp_mode_prop_val = !!val;
+ else if (prop == dsi->bllp_type_prop)
+ dsi->bllp_type_prop_val = !!val;
+ else if (prop == dsi->video_mode_prop)
+ dsi->video_mode_prop_val = (unsigned int)val;
+ else if (prop == dsi->bllp_burst_time_prop)
+ dsi->bllp_burst_time_prop_val = (unsigned int)val;
+ else if (prop == dsi->cmd_queue_prop)
+ dsi->cmd_queue_prop_val = (unsigned int)val;
+ else if (prop == dsi->height_out)
+ dsi->height_out_prop_val = (u32)val;
+ else if (prop == dsi->width_out)
+ dsi->width_out_prop_val = (u32)val;
+ else if (prop == dsi->in_fmt)
+ dsi->in_fmt_prop_val = (u32)val;
+ else if (prop == dsi->out_fmt)
+ dsi->out_fmt_prop_val = (u32)val;
+ else
+ return -EINVAL;
+
+ xlnx_dsi_set_config_parameters(dsi);
+
+ return 0;
+}
+
+static int
+xlnx_dsi_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *prop, uint64_t *val)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ if (prop == dsi->eotp_prop)
+ *val = dsi->eotp_prop_val;
+ else if (prop == dsi->bllp_mode_prop)
+ *val = dsi->bllp_mode_prop_val;
+ else if (prop == dsi->bllp_type_prop)
+ *val = dsi->bllp_type_prop_val;
+ else if (prop == dsi->video_mode_prop)
+ *val = dsi->video_mode_prop_val;
+ else if (prop == dsi->bllp_burst_time_prop)
+ *val = dsi->bllp_burst_time_prop_val;
+ else if (prop == dsi->cmd_queue_prop)
+ *val = dsi->cmd_queue_prop_val;
+ else if (prop == dsi->height_out)
+ *val = dsi->height_out_prop_val;
+ else if (prop == dsi->width_out)
+ *val = dsi->width_out_prop_val;
+ else if (prop == dsi->in_fmt)
+ *val = dsi->in_fmt_prop_val;
+ else if (prop == dsi->out_fmt)
+ *val = dsi->out_fmt_prop_val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int xlnx_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ u32 panel_lanes;
+ struct xlnx_dsi *dsi = host_to_dsi(host);
+
+ panel_lanes = device->lanes;
+ dsi->mode_flags = device->mode_flags;
+ dsi->panel_node = device->dev.of_node;
+
+ if (panel_lanes != dsi->lanes) {
+ dev_err(dsi->dev, "Mismatch of lanes. panel = %d, DSI = %d\n",
+ panel_lanes, dsi->lanes);
+ return -EINVAL;
+ }
+
+ if (dsi->lanes > 4 || dsi->lanes < 1) {
+ dev_err(dsi->dev, "%d lanes : invalid xlnx,dsi-num-lanes\n",
+ dsi->lanes);
+ return -EINVAL;
+ }
+
+ if (device->format != dsi->format) {
+ dev_err(dsi->dev, "Mismatch of format. panel = %d, DSI = %d\n",
+ device->format, dsi->format);
+ return -EINVAL;
+ }
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static int xlnx_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct xlnx_dsi *dsi = host_to_dsi(host);
+
+ dsi->panel = NULL;
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops xlnx_dsi_ops = {
+ .attach = xlnx_dsi_host_attach,
+ .detach = xlnx_dsi_host_detach,
+};
+
+static int xlnx_dsi_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+ int ret;
+
+ dev_dbg(dsi->dev, "connector dpms state: %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ ret = drm_panel_prepare(dsi->panel);
+ if (ret < 0) {
+ dev_err(dsi->dev, "DRM panel not found\n");
+ return ret;
+ }
+
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0) {
+ drm_panel_unprepare(dsi->panel);
+ dev_err(dsi->dev, "DRM panel not enabled\n");
+ return ret;
+ }
+ break;
+ default:
+ drm_panel_disable(dsi->panel);
+ drm_panel_unprepare(dsi->panel);
+ break;
+ }
+
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static enum drm_connector_status
+xlnx_dsi_detect(struct drm_connector *connector, bool force)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ if (!dsi->panel) {
+ dsi->panel = of_drm_find_panel(dsi->panel_node);
+ if (dsi->panel)
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ } else if (!dsi->panel_node) {
+ xlnx_dsi_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_panel_detach(dsi->panel);
+ dsi->panel = NULL;
+ }
+
+ if (dsi->panel)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static void xlnx_dsi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xlnx_dsi_connector_funcs = {
+ .dpms = xlnx_dsi_connector_dpms,
+ .detect = xlnx_dsi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xlnx_dsi_connector_destroy,
+ .atomic_set_property = xlnx_dsi_atomic_set_property,
+ .atomic_get_property = xlnx_dsi_atomic_get_property,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static int xlnx_dsi_get_modes(struct drm_connector *connector)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ if (dsi->panel)
+ return dsi->panel->funcs->get_modes(dsi->panel);
+
+ return 0;
+}
+
+static struct drm_encoder *
+xlnx_dsi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_dsi(connector)->encoder);
+}
+
+static struct drm_connector_helper_funcs xlnx_dsi_connector_helper_funcs = {
+ .get_modes = xlnx_dsi_get_modes,
+ .best_encoder = xlnx_dsi_best_encoder,
+};
+
+/**
+ * xlnx_dsi_connector_create_property - create DSI connector properties
+ *
+ * @connector: pointer to Xilinx DSI connector
+ *
+ * This function takes the xilinx DSI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void xlnx_dsi_connector_create_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ dsi->eotp_prop = drm_property_create_bool(dev, 0, "eotp");
+ dsi->video_mode_prop = drm_property_create_range(dev, 0, "video_mode",
+ 0, 2);
+ dsi->bllp_mode_prop = drm_property_create_bool(dev, 0, "bllp_mode");
+ dsi->bllp_type_prop = drm_property_create_bool(dev, 0, "bllp_type");
+ dsi->bllp_burst_time_prop =
+ drm_property_create_range(dev, 0, "bllp_burst_time", 0, 0xFFFF);
+ dsi->cmd_queue_prop = drm_property_create_range(dev, 0, "cmd_queue", 0,
+ 0xffffff);
+ dsi->height_out = drm_property_create_range(dev, 0, "height_out",
+ 2, 4096);
+ dsi->width_out = drm_property_create_range(dev, 0, "width_out",
+ 2, 4096);
+ dsi->in_fmt = drm_property_create_range(dev, 0, "in_fmt", 0, 16384);
+ dsi->out_fmt = drm_property_create_range(dev, 0, "out_fmt", 0, 16384);
+}
+
+/**
+ * xlnx_dsi_connector_attach_property - attach DSI connector
+ * properties
+ *
+ * @connector: pointer to Xilinx DSI connector
+ */
+static void xlnx_dsi_connector_attach_property(struct drm_connector *connector)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+ struct drm_mode_object *obj = &connector->base;
+
+ if (dsi->eotp_prop)
+ drm_object_attach_property(obj, dsi->eotp_prop, 1);
+
+ if (dsi->video_mode_prop)
+ drm_object_attach_property(obj, dsi->video_mode_prop, 0);
+
+ if (dsi->bllp_burst_time_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->bllp_burst_time_prop, 0);
+
+ if (dsi->bllp_mode_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->bllp_mode_prop, 0);
+
+ if (dsi->bllp_type_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->bllp_type_prop, 0);
+
+ if (dsi->cmd_queue_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->cmd_queue_prop, 0);
+
+ if (dsi->height_out)
+ drm_object_attach_property(obj, dsi->height_out, 0);
+
+ if (dsi->width_out)
+ drm_object_attach_property(obj, dsi->width_out, 0);
+
+ if (dsi->in_fmt)
+ drm_object_attach_property(obj, dsi->in_fmt, 0);
+
+ if (dsi->out_fmt)
+ drm_object_attach_property(obj, dsi->out_fmt, 0);
+}
+
+static int xlnx_dsi_create_connector(struct drm_encoder *encoder)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+ struct drm_connector *connector = &dsi->connector;
+ int ret;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xlnx_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (ret) {
+ dev_err(dsi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xlnx_dsi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xlnx_dsi_connector_create_property(connector);
+ xlnx_dsi_connector_attach_property(connector);
+
+ return 0;
+}
+
+/**
+ * xlnx_dsi_atomic_mode_set - derive the DSI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @crtc_state: Pointer to drm core crtc state
+ * @connector_state: DSI connector drm state
+ *
+ * This function derives the DSI IP timing parameters from the timing
+ * values given in the attached panel driver.
+ */
+static void
+xlnx_dsi_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+ struct videomode *vm = &dsi->vm;
+ struct drm_display_mode *m = &crtc_state->adjusted_mode;
+
+ /* Set bridge input and output parameters */
+ xlnx_bridge_set_input(dsi->bridge, m->hdisplay, m->vdisplay,
+ dsi->in_fmt_prop_val);
+ xlnx_bridge_set_output(dsi->bridge, dsi->width_out_prop_val,
+ dsi->height_out_prop_val,
+ dsi->out_fmt_prop_val);
+ xlnx_bridge_enable(dsi->bridge);
+
+ vm->hactive = m->hdisplay;
+ vm->vactive = m->vdisplay;
+ vm->vfront_porch = m->vsync_start - m->vdisplay;
+ vm->vback_porch = m->vtotal - m->vsync_end;
+ vm->vsync_len = m->vsync_end - m->vsync_start;
+ vm->hfront_porch = m->hsync_start - m->hdisplay;
+ vm->hback_porch = m->htotal - m->hsync_end;
+ vm->hsync_len = m->hsync_end - m->hsync_start;
+ xlnx_dsi_set_display_mode(dsi);
+}
+
+static void xlnx_dsi_disable(struct drm_encoder *encoder)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+
+ if (dsi->bridge)
+ xlnx_bridge_disable(dsi->bridge);
+
+ xlnx_dsi_set_display_disable(dsi);
+}
+
+static void xlnx_dsi_enable(struct drm_encoder *encoder)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+
+ xlnx_dsi_set_display_enable(dsi);
+}
+
+static const struct drm_encoder_helper_funcs xlnx_dsi_encoder_helper_funcs = {
+ .atomic_mode_set = xlnx_dsi_atomic_mode_set,
+ .enable = xlnx_dsi_enable,
+ .disable = xlnx_dsi_disable,
+};
+
+static const struct drm_encoder_funcs xlnx_dsi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xlnx_dsi_parse_dt(struct xlnx_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+ u32 datatype;
+ static const int xdsi_mul_fact[XDSI_NUM_DATA_T] = {300, 225, 225, 200};
+
+ dsi->dphy_clk_200M = devm_clk_get(dev, "dphy_clk_200M");
+ if (IS_ERR(dsi->dphy_clk_200M)) {
+ ret = PTR_ERR(dsi->dphy_clk_200M);
+ dev_err(dev, "failed to get dphy_clk_200M %d\n", ret);
+ return ret;
+ }
+
+ dsi->video_aclk = devm_clk_get(dev, "s_axis_aclk");
+ if (IS_ERR(dsi->video_aclk)) {
+ ret = PTR_ERR(dsi->video_aclk);
+ dev_err(dev, "failed to get video_clk %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Used as a multiplication factor for HACT based on used
+ * DSI data type.
+ *
+ * e.g. for RGB666_L datatype and 1920x1080 resolution,
+ * the Hact (WC) would be as follows -
+ * 1920 pixels * 18 bits per pixel / 8 bits per byte
+ * = 1920 pixels * 2.25 bytes per pixel = 4320 bytes.
+ *
+ * Data Type - Multiplication factor
+ * RGB888 - 3
+ * RGB666_L - 2.25
+- * RGB666_P - 2.25
+ * RGB565 - 2
+ *
+ * Since the multiplication factor maybe a floating number,
+ * a 100x multiplication factor is used.
+ */
+ ret = of_property_read_u32(node, "xlnx,dsi-num-lanes", &dsi->lanes);
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-num-lanes property\n");
+ return ret;
+ }
+ if (dsi->lanes > 4 || dsi->lanes < 1) {
+ dev_err(dsi->dev, "%d lanes : invalid lanes\n", dsi->lanes);
+ return -EINVAL;
+ }
+ ret = of_property_read_u32(node, "xlnx,dsi-data-type", &datatype);
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-data-type property\n");
+ return ret;
+ }
+ dsi->format = datatype;
+ if (datatype > MIPI_DSI_FMT_RGB565) {
+ dev_err(dsi->dev, "Invalid xlnx,dsi-data-type string\n");
+ return -EINVAL;
+ }
+ dsi->mul_factor = xdsi_mul_fact[datatype];
+ dev_dbg(dsi->dev, "DSI controller num lanes = %d", dsi->lanes);
+ dev_dbg(dsi->dev, "DSI controller datatype = %d\n", datatype);
+
+ return 0;
+}
+
+static int xlnx_dsi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_dsi *dsi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &dsi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * DSI tx drivers. DRM framework can support more than one CRTCs and
+ * DSI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+ drm_encoder_init(drm_dev, encoder, &xlnx_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ drm_encoder_helper_add(encoder, &xlnx_dsi_encoder_helper_funcs);
+ ret = xlnx_dsi_create_connector(encoder);
+ if (ret) {
+ dev_err(dsi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+ ret = mipi_dsi_host_register(&dsi->dsi_host);
+ if (ret) {
+ xlnx_dsi_connector_destroy(&dsi->connector);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+ return 0;
+}
+
+static void xlnx_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_dsi *dsi = dev_get_drvdata(dev);
+
+ xlnx_dsi_disable(&dsi->encoder);
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ xlnx_bridge_disable(dsi->bridge);
+}
+
+static const struct component_ops xlnx_dsi_component_ops = {
+ .bind = xlnx_dsi_bind,
+ .unbind = xlnx_dsi_unbind,
+};
+
+static int xlnx_dsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xlnx_dsi *dsi;
+ struct device_node *vpss_node;
+ int ret;
+ unsigned long rate;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->dsi_host.ops = &xlnx_dsi_ops;
+ dsi->dsi_host.dev = dev;
+ dsi->dev = dev;
+
+ ret = xlnx_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->iomem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dsi->iomem))
+ return PTR_ERR(dsi->iomem);
+
+ platform_set_drvdata(pdev, dsi);
+
+ /* Bridge support */
+ vpss_node = of_parse_phandle(dsi->dev->of_node, "xlnx,vpss", 0);
+ if (vpss_node) {
+ dsi->bridge = of_xlnx_bridge_get(vpss_node);
+ if (!dsi->bridge) {
+ dev_info(dsi->dev, "Didn't get bridge instance\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
+ ret = clk_set_rate(dsi->dphy_clk_200M, XDSI_DPHY_CLK_REQ);
+ if (ret) {
+ dev_err(dev, "failed to set dphy clk rate %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dsi->dphy_clk_200M);
+ if (ret) {
+ dev_err(dev, "failed to enable dphy clk %d\n", ret);
+ return ret;
+ }
+
+ rate = clk_get_rate(dsi->dphy_clk_200M);
+ if (rate < XDSI_DPHY_CLK_MIN && rate > XDSI_DPHY_CLK_MAX) {
+ dev_err(dev, "Error DPHY clock = %lu\n", rate);
+ ret = -EINVAL;
+ goto err_disable_dphy_clk;
+ }
+
+ ret = clk_prepare_enable(dsi->video_aclk);
+ if (ret) {
+ dev_err(dev, "failed to enable video clk %d\n", ret);
+ goto err_disable_dphy_clk;
+ }
+
+ ret = component_add(dev, &xlnx_dsi_component_ops);
+ if (ret < 0)
+ goto err_disable_video_clk;
+
+ return ret;
+
+err_disable_video_clk:
+ clk_disable_unprepare(dsi->video_aclk);
+err_disable_dphy_clk:
+ clk_disable_unprepare(dsi->dphy_clk_200M);
+ return ret;
+}
+
+static int xlnx_dsi_remove(struct platform_device *pdev)
+{
+ struct xlnx_dsi *dsi = platform_get_drvdata(pdev);
+
+ component_del(&pdev->dev, &xlnx_dsi_component_ops);
+ clk_disable_unprepare(dsi->video_aclk);
+ clk_disable_unprepare(dsi->dphy_clk_200M);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_dsi_of_match[] = {
+ { .compatible = "xlnx,dsi"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_dsi_of_match);
+
+static struct platform_driver dsi_driver = {
+ .probe = xlnx_dsi_probe,
+ .remove = xlnx_dsi_remove,
+ .driver = {
+ .name = "xlnx-dsi",
+ .of_match_table = xlnx_dsi_of_match,
+ },
+};
+
+module_platform_driver(dsi_driver);
+
+MODULE_AUTHOR("Siva Rajesh <sivaraj@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA MIPI DSI Tx Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_fb.c b/drivers/gpu/drm/xlnx/xlnx_fb.c
new file mode 100644
index 000000000000..4ef367e7ca4e
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_fb.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Framebuffer helper
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * Based on drm_fb_cma_helper.c
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+#include "xlnx_fb.h"
+
+#define XLNX_MAX_PLANES 4
+
+struct xlnx_fbdev {
+ struct drm_fb_helper fb_helper;
+ struct drm_framebuffer *fb;
+ unsigned int align;
+ unsigned int vres_mult;
+};
+
+static inline struct xlnx_fbdev *to_fbdev(struct drm_fb_helper *fb_helper)
+{
+ return container_of(fb_helper, struct xlnx_fbdev, fb_helper);
+}
+
+static struct drm_framebuffer_funcs xlnx_fb_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
+static int
+xlnx_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ unsigned int i;
+ int ret = 0;
+
+ switch (cmd) {
+ case FBIO_WAITFORVSYNC:
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set;
+ struct drm_crtc *crtc;
+
+ mode_set = &fb_helper->crtc_info[i].mode_set;
+ crtc = mode_set->crtc;
+ ret = drm_crtc_vblank_get(crtc);
+ if (!ret) {
+ drm_crtc_wait_one_vblank(crtc);
+ drm_crtc_vblank_put(crtc);
+ }
+ }
+ return ret;
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static struct fb_ops xlnx_fbdev_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_ioctl = xlnx_fb_ioctl,
+};
+
+/**
+ * xlnx_fbdev_create - Create the fbdev with a framebuffer
+ * @fb_helper: fb helper structure
+ * @size: framebuffer size info
+ *
+ * This function is based on drm_fbdev_cma_create().
+ *
+ * Return: 0 if successful, or the error code.
+ */
+static int xlnx_fbdev_create(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *size)
+{
+ struct xlnx_fbdev *fbdev = to_fbdev(fb_helper);
+ struct drm_device *drm = fb_helper->dev;
+ struct drm_gem_cma_object *obj;
+ struct drm_framebuffer *fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ u32 format;
+ const struct drm_format_info *info;
+ size_t bytes;
+ int ret;
+
+ dev_dbg(drm->dev, "surface width(%d), height(%d) and bpp(%d)\n",
+ size->surface_width, size->surface_height, size->surface_bpp);
+
+ size->surface_height *= fbdev->vres_mult;
+ bytes_per_pixel = DIV_ROUND_UP(size->surface_bpp, 8);
+ bytes = ALIGN(size->surface_width * bytes_per_pixel, fbdev->align);
+ bytes *= size->surface_height;
+
+ obj = drm_gem_cma_create(drm, bytes);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ fbi = framebuffer_alloc(0, drm->dev);
+ if (!fbi) {
+ dev_err(drm->dev, "Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_drm_gem_cma_free_object;
+ }
+
+ /* Override the depth given by fb helper with current format value */
+ format = xlnx_get_format(drm);
+ info = drm_format_info(format);
+ if (size->surface_bpp == info->cpp[0] * 8)
+ size->surface_depth = info->depth;
+
+ fbdev->fb = drm_gem_fbdev_fb_create(drm, size, fbdev->align, &obj->base,
+ &xlnx_fb_funcs);
+ if (IS_ERR(fbdev->fb)) {
+ dev_err(drm->dev, "Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(fbdev->fb);
+ goto err_framebuffer_release;
+ }
+
+ fb = fbdev->fb;
+ fb_helper->fb = fb;
+ fb_helper->fbdev = fbi;
+ fbi->par = fb_helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &xlnx_fbdev_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ dev_err(drm->dev, "Failed to allocate color map.\n");
+ goto err_fb_destroy;
+ }
+
+ drm_fb_helper_fill_info(fbi, fb_helper, size);
+ fbi->var.yres = fb->height / fbdev->vres_mult;
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * fb->pitches[0];
+
+ drm->mode_config.fb_base = (resource_size_t)obj->paddr;
+ fbi->screen_base = (char __iomem *)(obj->vaddr + offset);
+ fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+ fbi->screen_size = bytes;
+ fbi->fix.smem_len = bytes;
+
+ return 0;
+
+err_fb_destroy:
+ drm_framebuffer_unregister_private(fb);
+ drm_gem_fb_destroy(fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+ drm_gem_cma_free_object(&obj->base);
+ return ret;
+}
+
+static struct drm_fb_helper_funcs xlnx_fb_helper_funcs = {
+ .fb_probe = xlnx_fbdev_create,
+};
+
+/**
+ * xlnx_fb_init - Allocate and initializes the Xilinx framebuffer
+ * @drm: DRM device
+ * @preferred_bpp: preferred bits per pixel for the device
+ * @max_conn_count: maximum number of connectors
+ * @align: alignment value for pitch
+ * @vres_mult: multiplier for virtual resolution
+ *
+ * This function is based on drm_fbdev_cma_init().
+ *
+ * Return: a newly allocated drm_fb_helper struct or a ERR_PTR.
+ */
+struct drm_fb_helper *
+xlnx_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult)
+{
+ struct xlnx_fbdev *fbdev;
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ return ERR_PTR(-ENOMEM);
+
+ fbdev->vres_mult = vres_mult;
+ fbdev->align = align;
+ fb_helper = &fbdev->fb_helper;
+ drm_fb_helper_prepare(drm, fb_helper, &xlnx_fb_helper_funcs);
+
+ ret = drm_fb_helper_init(drm, fb_helper, max_conn_count);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to initialize drm fb helper.\n");
+ goto err_free;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to add connectors.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to set initial hw configuration.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return fb_helper;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_free:
+ kfree(fbdev);
+ return ERR_PTR(ret);
+}
+
+/**
+ * xlnx_fbdev_defio_fini - Free the defio fb
+ * @fbi: fb_info struct
+ *
+ * This function is based on drm_fbdev_cma_defio_fini().
+ */
+static void xlnx_fbdev_defio_fini(struct fb_info *fbi)
+{
+ if (!fbi->fbdefio)
+ return;
+
+ fb_deferred_io_cleanup(fbi);
+ kfree(fbi->fbdefio);
+ kfree(fbi->fbops);
+}
+
+/**
+ * xlnx_fbdev_fini - Free the Xilinx framebuffer
+ * @fb_helper: drm_fb_helper struct
+ *
+ * This function is based on drm_fbdev_cma_fini().
+ */
+void xlnx_fb_fini(struct drm_fb_helper *fb_helper)
+{
+ struct xlnx_fbdev *fbdev = to_fbdev(fb_helper);
+
+ drm_fb_helper_unregister_fbi(&fbdev->fb_helper);
+ if (fbdev->fb_helper.fbdev)
+ xlnx_fbdev_defio_fini(fbdev->fb_helper.fbdev);
+
+ if (fbdev->fb_helper.fb)
+ drm_framebuffer_remove(fbdev->fb_helper.fb);
+
+ drm_fb_helper_fini(&fbdev->fb_helper);
+ kfree(fbdev);
+}
+
+/**
+ * xlnx_fb_create - (struct drm_mode_config_funcs *)->fb_create callback
+ * @drm: DRM device
+ * @file_priv: drm file private data
+ * @mode_cmd: mode command for fb creation
+ *
+ * This functions creates a drm_framebuffer with xlnx_fb_funcs for given mode
+ * @mode_cmd. This functions is intended to be used for the fb_create callback
+ * function of drm_mode_config_funcs.
+ *
+ * Return: a drm_framebuffer object if successful, or
+ * ERR_PTR from drm_gem_fb_create_with_funcs().
+ */
+struct drm_framebuffer *
+xlnx_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ return drm_gem_fb_create_with_funcs(drm, file_priv, mode_cmd,
+ &xlnx_fb_funcs);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_fb.h b/drivers/gpu/drm/xlnx/xlnx_fb.h
new file mode 100644
index 000000000000..6efc985f2fb3
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_fb.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Framebuffer helper header
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_FB_H_
+#define _XLNX_FB_H_
+
+struct drm_fb_helper;
+
+struct drm_framebuffer *
+xlnx_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_fb_helper *
+xlnx_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult);
+void xlnx_fb_fini(struct drm_fb_helper *fb_helper);
+
+#endif /* _XLNX_FB_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_gem.c b/drivers/gpu/drm/xlnx/xlnx_gem.c
new file mode 100644
index 000000000000..4a5d533ec72e
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_gem.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS GEM helper
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "xlnx_drv.h"
+#include "xlnx_gem.h"
+
+/*
+ * xlnx_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+ * @file_priv: drm_file object
+ * @drm: DRM object
+ * @args: info for dumb scanout buffer creation
+ *
+ * This function is for dumb_create callback of drm_driver struct. Simply
+ * it wraps around drm_gem_cma_dumb_create() and sets the pitch value
+ * by retrieving the value from the device.
+ *
+ * Return: The return value from drm_gem_cma_dumb_create()
+ */
+int xlnx_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ int pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ unsigned int align = xlnx_get_align(drm);
+
+ if (!args->pitch || !IS_ALIGNED(args->pitch, align))
+ args->pitch = ALIGN(pitch, align);
+
+ return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_gem.h b/drivers/gpu/drm/xlnx/xlnx_gem.h
new file mode 100644
index 000000000000..f380de916379
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_gem.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS GEM helper header
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_GEM_H_
+#define _XLNX_GEM_H_
+
+int xlnx_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+#endif /* _XLNX_GEM_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_mixer.c b/drivers/gpu/drm/xlnx/xlnx_mixer.c
new file mode 100644
index 000000000000..2daa4fda078f
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_mixer.c
@@ -0,0 +1,2821 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx logicore video mixer driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ * : Jeffrey Mouroux <jmouroux@xilinx.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/dmaengine.h>
+#include <video/videomode.h>
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+
+/**************************** Register Data **********************************/
+#define XVMIX_AP_CTRL 0x00000
+#define XVMIX_GIE 0x00004
+#define XVMIX_IER 0x00008
+#define XVMIX_ISR 0x0000c
+#define XVMIX_WIDTH_DATA 0x00010
+#define XVMIX_HEIGHT_DATA 0x00018
+#define XVMIX_BACKGROUND_Y_R_DATA 0x00028
+#define XVMIX_BACKGROUND_U_G_DATA 0x00030
+#define XVMIX_BACKGROUND_V_B_DATA 0x00038
+#define XVMIX_LAYERENABLE_DATA 0x00040
+#define XVMIX_LAYERALPHA_0_DATA 0x00100
+#define XVMIX_LAYERSTARTX_0_DATA 0x00108
+#define XVMIX_LAYERSTARTY_0_DATA 0x00110
+#define XVMIX_LAYERWIDTH_0_DATA 0x00118
+#define XVMIX_LAYERSTRIDE_0_DATA 0x00120
+#define XVMIX_LAYERHEIGHT_0_DATA 0x00128
+#define XVMIX_LAYERSCALE_0_DATA 0x00130
+#define XVMIX_LAYERVIDEOFORMAT_0_DATA 0x00138
+#define XVMIX_LAYER1_BUF1_V_DATA 0x00240
+#define XVMIX_LAYER1_BUF2_V_DATA 0x0024c
+#define XVMIX_LOGOSTARTX_DATA 0x01000
+#define XVMIX_LOGOSTARTY_DATA 0x01008
+#define XVMIX_LOGOWIDTH_DATA 0x01010
+#define XVMIX_LOGOHEIGHT_DATA 0x01018
+#define XVMIX_LOGOSCALEFACTOR_DATA 0x01020
+#define XVMIX_LOGOALPHA_DATA 0x01028
+#define XVMIX_LOGOCLRKEYMIN_R_DATA 0x01030
+#define XVMIX_LOGOCLRKEYMIN_G_DATA 0x01038
+#define XVMIX_LOGOCLRKEYMIN_B_DATA 0x01040
+#define XVMIX_LOGOCLRKEYMAX_R_DATA 0x01048
+#define XVMIX_LOGOCLRKEYMAX_G_DATA 0x01050
+#define XVMIX_LOGOCLRKEYMAX_B_DATA 0x01058
+#define XVMIX_LOGOR_V_BASE 0x10000
+#define XVMIX_LOGOR_V_HIGH 0x10fff
+#define XVMIX_LOGOG_V_BASE 0x20000
+#define XVMIX_LOGOG_V_HIGH 0x20fff
+#define XVMIX_LOGOB_V_BASE 0x30000
+#define XVMIX_LOGOB_V_HIGH 0x30fff
+#define XVMIX_LOGOA_V_BASE 0x40000
+#define XVMIX_LOGOA_V_HIGH 0x40fff
+
+/************************** Constant Definitions *****************************/
+#define XVMIX_LOGO_OFFSET 0x1000
+#define XVMIX_MASK_DISABLE_ALL_LAYERS 0x0
+#define XVMIX_REG_OFFSET 0x100
+#define XVMIX_MASTER_LAYER_IDX 0x0
+#define XVMIX_LOGO_LAYER_IDX 0x1
+#define XVMIX_DISP_MAX_WIDTH 4096
+#define XVMIX_DISP_MAX_HEIGHT 2160
+#define XVMIX_MAX_OVERLAY_LAYERS 16
+#define XVMIX_MAX_BPC 16
+#define XVMIX_ALPHA_MIN 0
+#define XVMIX_ALPHA_MAX 256
+#define XVMIX_LAYER_WIDTH_MIN 64
+#define XVMIX_LAYER_HEIGHT_MIN 64
+#define XVMIX_LOGO_LAYER_WIDTH_MIN 32
+#define XVMIX_LOGO_LAYER_HEIGHT_MIN 32
+#define XVMIX_LOGO_LAYER_WIDTH_MAX 256
+#define XVMIX_LOGO_LAYER_HEIGHT_MAX 256
+#define XVMIX_IRQ_DONE_MASK BIT(0)
+#define XVMIX_GIE_EN_MASK BIT(0)
+#define XVMIX_AP_EN_MASK BIT(0)
+#define XVMIX_AP_RST_MASK BIT(7)
+#define XVMIX_MAX_NUM_SUB_PLANES 4
+#define XVMIX_SCALE_FACTOR_1X 0
+#define XVMIX_SCALE_FACTOR_2X 1
+#define XVMIX_SCALE_FACTOR_4X 2
+#define XVMIX_SCALE_FACTOR_INVALID 3
+#define XVMIX_BASE_ALIGN 8
+
+/*************************** STATIC DATA ************************************/
+static const u32 color_table[] = {
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_AYUV,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_Y8,
+ DRM_FORMAT_Y10,
+ DRM_FORMAT_XVUY2101010,
+ DRM_FORMAT_VUY888,
+ DRM_FORMAT_XVUY8888,
+ DRM_FORMAT_XV15,
+ DRM_FORMAT_XV20,
+};
+
+/*********************** Inline Functions/Macros *****************************/
+#define to_mixer_hw(p) (&((p)->mixer->mixer_hw))
+#define to_xlnx_crtc(x) container_of(x, struct xlnx_crtc, crtc)
+#define to_xlnx_plane(x) container_of(x, struct xlnx_mix_plane, base)
+#define to_xlnx_mixer(x) container_of(x, struct xlnx_mix, crtc)
+
+/**
+ * enum xlnx_mix_layer_id - Describes the layer by index to be acted upon
+ * @XVMIX_LAYER_MASTER: Master layer
+ * @XVMIX_LAYER_1: Layer 1
+ * @XVMIX_LAYER_2: Layer 2
+ * @XVMIX_LAYER_3: Layer 3
+ * @XVMIX_LAYER_4: Layer 4
+ * @XVMIX_LAYER_5: Layer 5
+ * @XVMIX_LAYER_6: Layer 6
+ * @XVMIX_LAYER_7: Layer 7
+ * @XVMIX_LAYER_8: Layer 8
+ * @XVMIX_LAYER_9: Layer 9
+ * @XVMIX_LAYER_10: Layer 10
+ * @XVMIX_LAYER_11: Layer 11
+ * @XVMIX_LAYER_12: Layer 12
+ * @XVMIX_LAYER_13: Layer 13
+ * @XVMIX_LAYER_14: Layer 14
+ * @XVMIX_LAYER_15: Layer 15
+ * @XVMIX_LAYER_16: Layer 16
+ */
+enum xlnx_mix_layer_id {
+ XVMIX_LAYER_MASTER = 0,
+ XVMIX_LAYER_1,
+ XVMIX_LAYER_2,
+ XVMIX_LAYER_3,
+ XVMIX_LAYER_4,
+ XVMIX_LAYER_5,
+ XVMIX_LAYER_6,
+ XVMIX_LAYER_7,
+ XVMIX_LAYER_8,
+ XVMIX_LAYER_9,
+ XVMIX_LAYER_10,
+ XVMIX_LAYER_11,
+ XVMIX_LAYER_12,
+ XVMIX_LAYER_13,
+ XVMIX_LAYER_14,
+ XVMIX_LAYER_15,
+ XVMIX_LAYER_16
+};
+
+/**
+ * struct xlnx_mix_layer_data - Describes the hardware configuration of a given
+ * mixer layer
+ * @hw_config: struct specifying the IP hardware constraints for this layer
+ * @vid_fmt: DRM format for this layer
+ * @can_alpha: Indicates that layer alpha is enabled for this layer
+ * @can_scale: Indicates that layer scaling is enabled for this layer
+ * @is_streaming: Indicates layer is not using mixer DMA but streaming from
+ * external DMA
+ * @max_width: Max possible pixel width
+ * @max_height: Max possible pixel height
+ * @min_width: Min possible pixel width
+ * @min_height: Min possible pixel height
+ * @layer_regs: struct containing current cached register values
+ * @buff_addr: Current physical address of image buffer
+ * @x_pos: Current CRTC x offset
+ * @y_pos: Current CRTC y offset
+ * @width: Current width in pixels
+ * @height: Current hight in pixels
+ * @stride: Current stride (when Mixer is performing DMA)
+ * @alpha: Current alpha setting
+ * @is_active: Logical flag indicating layer in use. If false, calls to
+ * enable layer will be ignored.
+ * @scale_fact: Current scaling factor applied to layer
+ * @id: The logical layer id identifies which layer this struct describes
+ * (e.g. 0 = master, 1-15 = overlay).
+ *
+ * All mixer layers are reprsented by an instance of this struct:
+ * output streaming, overlay, logo.
+ * Current layer-specific register state is stored in the layer_regs struct.
+ * The hardware configuration is stored in struct hw_config.
+ *
+ * Note:
+ * Some properties of the logo layer are unique and not described in this
+ * struct. Those properites are part of the xlnx_mix struct as global
+ * properties.
+ */
+struct xlnx_mix_layer_data {
+ struct {
+ u32 vid_fmt;
+ bool can_alpha;
+ bool can_scale;
+ bool is_streaming;
+ u32 max_width;
+ u32 max_height;
+ u32 min_width;
+ u32 min_height;
+ } hw_config;
+
+ struct {
+ u64 buff_addr1;
+ u64 buff_addr2;
+ u32 x_pos;
+ u32 y_pos;
+ u32 width;
+ u32 height;
+ u32 stride;
+ u32 alpha;
+ bool is_active;
+ u32 scale_fact;
+ } layer_regs;
+
+ enum xlnx_mix_layer_id id;
+};
+
+/**
+ * struct xlnx_mix_hw - Describes a mixer IP block instance within the design
+ * @base: Base physical address of Mixer IP in memory map
+ * @logo_layer_en: Indicates logo layer is enabled in hardware
+ * @logo_pixel_alpha_enabled: Indicates that per-pixel alpha supported for logo
+ * layer
+ * @max_layer_width: Max possible width for any layer on this Mixer
+ * @max_layer_height: Max possible height for any layer on this Mixer
+ * @max_logo_layer_width: Min possible width for any layer on this Mixer
+ * @max_logo_layer_height: Min possible height for any layer on this Mixer
+ * @num_layers: Max number of layers (excl: logo)
+ * @bg_layer_bpc: Bits per component for the background streaming layer
+ * @dma_addr_size: dma address size in bits
+ * @ppc: Pixels per component
+ * @irq: Interrupt request number assigned
+ * @bg_color: Current RGB color value for internal background color generator
+ * @layer_data: Array of layer data
+ * @layer_cnt: Layer data array count
+ * @max_layers: Maximum number of layers supported by hardware
+ * @logo_layer_id: Index of logo layer
+ * @logo_en_mask: Mask used to enable logo layer
+ * @enable_all_mask: Mask used to enable all layers
+ * @reset_gpio: GPIO line used to reset IP between modesetting operations
+ * @intrpt_handler_fn: Interrupt handler function called when frame is completed
+ * @intrpt_data: Data pointer passed to interrupt handler
+ *
+ * Used as the primary data structure for many L2 driver functions. Logo layer
+ * data, if enabled within the IP, is described in this structure. All other
+ * layers are described by an instance of xlnx_mix_layer_data referenced by this
+ * struct.
+ *
+ */
+struct xlnx_mix_hw {
+ void __iomem *base;
+ bool logo_layer_en;
+ bool logo_pixel_alpha_enabled;
+ u32 max_layer_width;
+ u32 max_layer_height;
+ u32 max_logo_layer_width;
+ u32 max_logo_layer_height;
+ u32 num_layers;
+ u32 bg_layer_bpc;
+ u32 dma_addr_size;
+ u32 ppc;
+ int irq;
+ u64 bg_color;
+ struct xlnx_mix_layer_data *layer_data;
+ u32 layer_cnt;
+ u32 max_layers;
+ u32 logo_layer_id;
+ u32 logo_en_mask;
+ u32 enable_all_mask;
+ struct gpio_desc *reset_gpio;
+ void (*intrpt_handler_fn)(void *);
+ void *intrpt_data;
+};
+
+/**
+ * struct xlnx_mix - Container for interfacing DRM driver to mixer
+ * @mixer_hw: Object representing actual hardware state of mixer
+ * @master: Logical master device from xlnx drm
+ * @crtc: Xilinx DRM driver crtc object
+ * @drm_primary_layer: Hardware layer serving as logical DRM primary layer
+ * @hw_master_layer: Base video streaming layer
+ * @hw_logo_layer: Hardware logo layer
+ * @planes: Mixer overlay layers
+ * @num_planes : number of planes
+ * @max_width : maximum width of plane
+ * @max_height : maximum height of plane
+ * @max_cursor_width : maximum cursor width
+ * @max_cursor_height: maximum cursor height
+ * @alpha_prop: Global layer alpha property
+ * @scale_prop: Layer scale property (1x, 2x or 4x)
+ * @bg_color: Background color property for primary layer
+ * @drm: core drm object
+ * @pixel_clock: pixel clock for mixer
+ * @pixel_clock_enabled: pixel clock status
+ * @dpms: mixer drm state
+ * @event: vblank pending event
+ * @vtc_bridge: vtc_bridge structure
+ *
+ * Contains pointers to logical constructions such as the DRM plane manager as
+ * well as pointers to distinquish the mixer layer serving as the DRM "primary"
+ * plane from the actual mixer layer which serves as the background layer in
+ * hardware.
+ *
+ */
+struct xlnx_mix {
+ struct xlnx_mix_hw mixer_hw;
+ struct platform_device *master;
+ struct xlnx_crtc crtc;
+ struct xlnx_mix_plane *drm_primary_layer;
+ struct xlnx_mix_plane *hw_master_layer;
+ struct xlnx_mix_plane *hw_logo_layer;
+ struct xlnx_mix_plane *planes;
+ u32 num_planes;
+ u32 max_width;
+ u32 max_height;
+ u32 max_cursor_width;
+ u32 max_cursor_height;
+ struct drm_property *alpha_prop;
+ struct drm_property *scale_prop;
+ struct drm_property *bg_color;
+ struct drm_device *drm;
+ struct clk *pixel_clock;
+ bool pixel_clock_enabled;
+ int dpms;
+ struct drm_pending_vblank_event *event;
+ struct xlnx_bridge *vtc_bridge;
+};
+
+/**
+ * struct xlnx_mix_plane_dma - Xilinx drm plane VDMA object
+ *
+ * @chan: dma channel
+ * @xt: dma interleaved configuration template
+ * @sgl: data chunk for dma_interleaved_template
+ * @is_active: flag if the DMA is active
+ */
+struct xlnx_mix_plane_dma {
+ struct dma_chan *chan;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+ bool is_active;
+};
+
+/**
+ * struct xlnx_mix_plane - Xilinx drm plane object
+ *
+ * @base: base drm plane object
+ * @mixer_layer: video mixer hardware layer data instance
+ * @mixer: mixer DRM object
+ * @dma: dma object
+ * @id: plane id
+ * @dpms: current dpms level
+ * @format: pixel format
+ */
+struct xlnx_mix_plane {
+ struct drm_plane base;
+ struct xlnx_mix_layer_data *mixer_layer;
+ struct xlnx_mix *mixer;
+ struct xlnx_mix_plane_dma dma[XVMIX_MAX_NUM_SUB_PLANES];
+ int id;
+ int dpms;
+ u32 format;
+};
+
+static inline void reg_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline void reg_writeq(void __iomem *base, int offset, u64 val)
+{
+ writel(lower_32_bits(val), base + offset);
+ writel(upper_32_bits(val), base + offset + 4);
+}
+
+static inline u32 reg_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xlnx_mix_intrpt_enable_done - Enables interrupts
+ * @mixer: instance of mixer IP core
+ *
+ * Enables interrupts in the mixer core
+ */
+static void xlnx_mix_intrpt_enable_done(struct xlnx_mix_hw *mixer)
+{
+ u32 curr_val = reg_readl(mixer->base, XVMIX_IER);
+
+ /* Enable Interrupts */
+ reg_writel(mixer->base, XVMIX_IER, curr_val | XVMIX_IRQ_DONE_MASK);
+ reg_writel(mixer->base, XVMIX_GIE, XVMIX_GIE_EN_MASK);
+}
+
+/**
+ * xlnx_mix_intrpt_disable - Disable interrupts
+ * @mixer: instance of mixer IP core
+ *
+ * Disables interrupts in the mixer core
+ */
+static void xlnx_mix_intrpt_disable(struct xlnx_mix_hw *mixer)
+{
+ u32 curr_val = reg_readl(mixer->base, XVMIX_IER);
+
+ reg_writel(mixer->base, XVMIX_IER, curr_val & (~XVMIX_IRQ_DONE_MASK));
+ reg_writel(mixer->base, XVMIX_GIE, 0);
+}
+
+/**
+ * xlnx_mix_start - Start the mixer core video generator
+ * @mixer: Mixer core instance for which to start video output
+ *
+ * Starts the core to generate a video frame.
+ */
+static void xlnx_mix_start(struct xlnx_mix_hw *mixer)
+{
+ u32 val;
+
+ val = XVMIX_AP_RST_MASK | XVMIX_AP_EN_MASK;
+ reg_writel(mixer->base, XVMIX_AP_CTRL, val);
+}
+
+/**
+ * xlnx_mix_stop - Stop the mixer core video generator
+ * @mixer: Mixer core instance for which to stop video output
+ *
+ * Starts the core to generate a video frame.
+ */
+static void xlnx_mix_stop(struct xlnx_mix_hw *mixer)
+{
+ reg_writel(mixer->base, XVMIX_AP_CTRL, 0);
+}
+
+static inline uint32_t xlnx_mix_get_intr_status(struct xlnx_mix_hw *mixer)
+{
+ return reg_readl(mixer->base, XVMIX_ISR) & XVMIX_IRQ_DONE_MASK;
+}
+
+static inline void xlnx_mix_clear_intr_status(struct xlnx_mix_hw *mixer,
+ uint32_t intr)
+{
+ reg_writel(mixer->base, XVMIX_ISR, intr);
+}
+
+/**
+ * xlnx_mix_get_layer_data - Retrieve current hardware and register
+ * values for a logical video layer
+ * @mixer: Mixer instance to interrogate
+ * @id: Id of layer for which data is requested
+ *
+ * Return:
+ * Structure containing layer-specific data; NULL upon failure
+ */
+static struct xlnx_mix_layer_data *
+xlnx_mix_get_layer_data(struct xlnx_mix_hw *mixer, enum xlnx_mix_layer_id id)
+{
+ u32 i;
+ struct xlnx_mix_layer_data *layer_data;
+
+ for (i = 0; i <= (mixer->layer_cnt - 1); i++) {
+ layer_data = &mixer->layer_data[i];
+ if (layer_data->id == id)
+ return layer_data;
+ }
+ return NULL;
+}
+
+/**
+ * xlnx_mix_set_active_area - Sets the number of active horizontal and
+ * vertical scan lines for the mixer background layer.
+ * @mixer: Mixer instance for which to set a new viewable area
+ * @hactive: Width of new background image dimension
+ * @vactive: Height of new background image dimension
+ *
+ * Minimum values are 64x64 with maximum values determined by the IP hardware
+ * design.
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_set_active_area(struct xlnx_mix_hw *mixer,
+ u32 hactive, u32 vactive)
+{
+ struct xlnx_mix_layer_data *ld =
+ xlnx_mix_get_layer_data(mixer, XVMIX_LAYER_MASTER);
+
+ if (hactive > ld->hw_config.max_width ||
+ vactive > ld->hw_config.max_height) {
+ DRM_ERROR("Invalid layer dimention\n");
+ return -EINVAL;
+ }
+ /* set resolution */
+ reg_writel(mixer->base, XVMIX_HEIGHT_DATA, vactive);
+ reg_writel(mixer->base, XVMIX_WIDTH_DATA, hactive);
+ ld->layer_regs.width = hactive;
+ ld->layer_regs.height = vactive;
+
+ return 0;
+}
+
+/**
+ * is_window_valid - Validate requested plane dimensions
+ * @mixer: Mixer core instance for which to stop video output
+ * @x_pos: x position requested for start of plane
+ * @y_pos: y position requested for start of plane
+ * @width: width of plane
+ * @height: height of plane
+ * @scale: scale factor of plane
+ *
+ * Validates if the requested window is within the frame boundary
+ *
+ * Return:
+ * true on success, false on failure
+ */
+static bool is_window_valid(struct xlnx_mix_hw *mixer, u32 x_pos, u32 y_pos,
+ u32 width, u32 height, u32 scale)
+{
+ struct xlnx_mix_layer_data *master_layer;
+ int scale_factor[3] = {1, 2, 4};
+
+ master_layer = xlnx_mix_get_layer_data(mixer, XVMIX_LAYER_MASTER);
+
+ /* Check if window scale factor is set */
+ if (scale < XVMIX_SCALE_FACTOR_INVALID) {
+ width *= scale_factor[scale];
+ height *= scale_factor[scale];
+ }
+
+ /* verify overlay falls within currently active background area */
+ if (((x_pos + width) <= master_layer->layer_regs.width) &&
+ ((y_pos + height) <= master_layer->layer_regs.height))
+ return true;
+
+ DRM_ERROR("Requested plane dimensions can't be set\n");
+ return false;
+}
+
+/**
+ * xlnx_mix_layer_enable - Enables the requested layers
+ * @mixer: Mixer instance in which to enable a video layer
+ * @id: Logical id (e.g. 16 = logo layer) to enable
+ *
+ * Enables (permit video output) for layers in mixer
+ * Enables the layer denoted by id in the IP core.
+ * Layer 0 will indicate the background layer and layer 8 the logo
+ * layer. Passing max layers value will enable all
+ */
+static void xlnx_mix_layer_enable(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ u32 curr_state;
+
+ /* Ensure layer is marked as 'active' by application before
+ * turning on in hardware. In some cases, layer register data
+ * may be written to otherwise inactive layers in lieu of, eventually,
+ * turning them on.
+ */
+ layer_data = xlnx_mix_get_layer_data(mixer, id);
+ if (!layer_data) {
+ DRM_ERROR("Invalid layer id %d\n", id);
+ return;
+ }
+ if (!layer_data->layer_regs.is_active)
+ return; /* for inactive layers silently return */
+
+ /* Check if request is to enable all layers or single layer */
+ if (id == mixer->max_layers) {
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA,
+ mixer->enable_all_mask);
+
+ } else if ((id < mixer->layer_cnt) || ((id == mixer->logo_layer_id) &&
+ mixer->logo_layer_en)) {
+ curr_state = reg_readl(mixer->base, XVMIX_LAYERENABLE_DATA);
+ if (id == mixer->logo_layer_id)
+ curr_state |= mixer->logo_en_mask;
+ else
+ curr_state |= BIT(id);
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA, curr_state);
+ } else {
+ DRM_ERROR("Can't enable requested layer %d\n", id);
+ }
+}
+
+/**
+ * xlnx_mix_disp_layer_enable - Enables video output represented by the
+ * plane object
+ * @plane: Drm plane object describing video layer to enable
+ *
+ */
+static void xlnx_mix_disp_layer_enable(struct xlnx_mix_plane *plane)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ struct xlnx_mix_layer_data *l_data;
+ u32 id;
+
+ if (!plane)
+ return;
+ mixer_hw = to_mixer_hw(plane);
+ l_data = plane->mixer_layer;
+ id = l_data->id;
+ if (id < XVMIX_LAYER_MASTER || id > mixer_hw->logo_layer_id) {
+ DRM_DEBUG_KMS("Attempt to activate invalid layer: %d\n", id);
+ return;
+ }
+ if (id == XVMIX_LAYER_MASTER && !l_data->hw_config.is_streaming)
+ return;
+
+ xlnx_mix_layer_enable(mixer_hw, id);
+}
+
+/**
+ * xlnx_mix_layer_disable - Disables the requested layer
+ * @mixer: Mixer for which the layer will be disabled
+ * @id: Logical id of the layer to be disabled (0-16)
+ *
+ * Disables the layer denoted by layer_id in the IP core.
+ * Layer 0 will indicate the background layer and layer 16 the logo
+ * layer. Passing the value of max layers will disable all
+ * layers.
+ */
+static void xlnx_mix_layer_disable(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id)
+{
+ u32 num_layers, curr_state;
+
+ num_layers = mixer->layer_cnt;
+
+ if (id == mixer->max_layers) {
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA,
+ XVMIX_MASK_DISABLE_ALL_LAYERS);
+ } else if ((id < num_layers) ||
+ ((id == mixer->logo_layer_id) && (mixer->logo_layer_en))) {
+ curr_state = reg_readl(mixer->base, XVMIX_LAYERENABLE_DATA);
+ if (id == mixer->logo_layer_id)
+ curr_state &= ~(mixer->logo_en_mask);
+ else
+ curr_state &= ~(BIT(id));
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA, curr_state);
+ } else {
+ DRM_ERROR("Can't disable requested layer %d\n", id);
+ }
+}
+
+/**
+ * xlnx_mix_disp_layer_disable - Disables video output represented by the
+ * plane object
+ * @plane: Drm plane object describing video layer to disable
+ *
+ */
+static void xlnx_mix_disp_layer_disable(struct xlnx_mix_plane *plane)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ u32 layer_id;
+
+ if (plane)
+ mixer_hw = to_mixer_hw(plane);
+ else
+ return;
+ layer_id = plane->mixer_layer->id;
+ if (layer_id < XVMIX_LAYER_MASTER ||
+ layer_id > mixer_hw->logo_layer_id)
+ return;
+
+ xlnx_mix_layer_disable(mixer_hw, layer_id);
+}
+
+static int xlnx_mix_mark_layer_inactive(struct xlnx_mix_plane *plane)
+{
+ if (!plane || !plane->mixer_layer)
+ return -ENODEV;
+
+ plane->mixer_layer->layer_regs.is_active = false;
+
+ return 0;
+}
+
+/* apply mode to plane pipe */
+static void xlnx_mix_plane_commit(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ unsigned int i;
+
+ /* for xlnx video framebuffer dma, if used */
+ xilinx_xdma_drm_config(plane->dma[0].chan, plane->format);
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++) {
+ struct xlnx_mix_plane_dma *dma = &plane->dma[i];
+
+ if (dma->chan && dma->is_active) {
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma->chan,
+ &dma->xt,
+ flags);
+ if (!desc) {
+ DRM_ERROR("failed to prepare DMA descriptor\n");
+ return;
+ }
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan);
+ }
+ }
+}
+
+static int xlnx_mix_plane_get_max_width(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_width;
+}
+
+static int xlnx_mix_plane_get_max_height(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_height;
+}
+
+static int xlnx_mix_plane_get_max_cursor_width(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_cursor_width;
+}
+
+static int xlnx_mix_plane_get_max_cursor_height(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_cursor_height;
+}
+
+static int xlnx_mix_crtc_get_max_width(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_width(crtc->crtc.primary);
+}
+
+static int xlnx_mix_crtc_get_max_height(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_height(crtc->crtc.primary);
+}
+
+static unsigned int xlnx_mix_crtc_get_max_cursor_width(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_cursor_width(crtc->crtc.primary);
+}
+
+static unsigned int xlnx_mix_crtc_get_max_cursor_height(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_cursor_height(crtc->crtc.primary);
+}
+
+/**
+ * xlnx_mix_crtc_get_format - Get the current device format
+ * @crtc: xlnx crtc object
+ *
+ * Get the current format of pipeline
+ *
+ * Return: the corresponding DRM_FORMAT_XXX
+ */
+static uint32_t xlnx_mix_crtc_get_format(struct xlnx_crtc *crtc)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(crtc->crtc.primary);
+
+ return plane->format;
+}
+
+/**
+ * xlnx_mix_crtc_get_align - Get the alignment value for pitch
+ * @crtc: xlnx crtc object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+static unsigned int xlnx_mix_crtc_get_align(struct xlnx_crtc *crtc)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(crtc->crtc.primary);
+ struct xlnx_mix *m = plane->mixer;
+
+ return XVMIX_BASE_ALIGN * m->mixer_hw.ppc;
+}
+
+/**
+ * xlnx_mix_attach_plane_prop - Attach mixer-specific drm property to
+ * the given plane
+ * @plane: Xilinx drm plane object to inspect and attach appropriate
+ * properties to
+ *
+ * The linked mixer layer will be inspected to see what capabilities it offers
+ * (e.g. global layer alpha; scaling) and drm property objects that indicate
+ * those capabilities will then be attached and initialized to default values.
+ */
+static void xlnx_mix_attach_plane_prop(struct xlnx_mix_plane *plane)
+{
+ struct drm_mode_object *base = &plane->base.base;
+ struct xlnx_mix *mixer = plane->mixer;
+
+ if (plane->mixer_layer->hw_config.can_scale)
+ drm_object_attach_property(base, mixer->scale_prop,
+ XVMIX_SCALE_FACTOR_1X);
+ if (plane->mixer_layer->hw_config.can_alpha)
+ drm_object_attach_property(base, mixer->alpha_prop,
+ XVMIX_ALPHA_MAX);
+}
+
+static int xlnx_mix_mark_layer_active(struct xlnx_mix_plane *plane)
+{
+ if (!plane->mixer_layer)
+ return -ENODEV;
+ plane->mixer_layer->layer_regs.is_active = true;
+
+ return 0;
+}
+
+static bool xlnx_mix_isfmt_support(u32 format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(color_table); i++) {
+ if (format == color_table[i])
+ return true;
+ }
+ return false;
+}
+
+/*************** DISPLAY ************/
+
+/**
+ * xlnx_mix_get_layer_scaling - Get layer scaling factor
+ * @mixer: Mixer instance to program with new background color
+ * @id: Plane id
+ *
+ * Applicable only for overlay layers
+ *
+ * Return:
+ * scaling factor of the specified layer
+ */
+static int xlnx_mix_get_layer_scaling(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id)
+{
+ int scale_factor = 0;
+ u32 reg;
+ struct xlnx_mix_layer_data *l_data = xlnx_mix_get_layer_data(mixer, id);
+
+ if (id == mixer->logo_layer_id) {
+ if (mixer->logo_layer_en) {
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS)
+ reg = XVMIX_LOGOSCALEFACTOR_DATA +
+ XVMIX_LOGO_OFFSET;
+ else
+ reg = XVMIX_LOGOSCALEFACTOR_DATA;
+ scale_factor = reg_readl(mixer->base, reg);
+ l_data->layer_regs.scale_fact = scale_factor;
+ }
+ } else {
+ /*Layer0-Layer15*/
+ if (id < mixer->logo_layer_id && l_data->hw_config.can_scale) {
+ reg = XVMIX_LAYERSCALE_0_DATA + (id * XVMIX_REG_OFFSET);
+ scale_factor = reg_readl(mixer->base, reg);
+ l_data->layer_regs.scale_fact = scale_factor;
+ }
+ }
+ return scale_factor;
+}
+
+/**
+ * xlnx_mix_set_layer_window - Sets the position of an overlay layer
+ * @mixer: Specific mixer object instance controlling the video
+ * @id: Logical layer id (1-15) to be positioned
+ * @x_pos: new: Column to start display of overlay layer
+ * @y_pos: new: Row to start display of overlay layer
+ * @width: Number of active columns to dislay for overlay layer
+ * @height: Number of active columns to display for overlay layer
+ * @stride: Width in bytes of overaly memory buffer (memory layer only)
+ *
+ * Sets the position of an overlay layer over the background layer (layer 0)
+ * Applicable only for layers 1-15 or the logo layer
+ *
+ * Return:
+ * Zero on success, -EINVAL if position is invalid or -ENODEV if layer
+ */
+static int xlnx_mix_set_layer_window(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id, u32 x_pos,
+ u32 y_pos, u32 width, u32 height,
+ u32 stride)
+{
+ struct xlnx_mix_layer_data *l_data;
+ u32 scale = 0;
+ int status = -EINVAL;
+ u32 x_reg, y_reg, w_reg, h_reg, s_reg;
+ u32 off;
+
+ l_data = xlnx_mix_get_layer_data(mixer, id);
+ if (!l_data)
+ return status;
+
+ scale = xlnx_mix_get_layer_scaling(mixer, id);
+ if (!is_window_valid(mixer, x_pos, y_pos, width, height, scale))
+ return status;
+
+ if (id == mixer->logo_layer_id) {
+ if (!(mixer->logo_layer_en &&
+ width <= l_data->hw_config.max_width &&
+ height <= l_data->hw_config.max_height &&
+ height >= l_data->hw_config.min_height &&
+ width >= l_data->hw_config.min_width))
+ return status;
+
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS) {
+ x_reg = XVMIX_LOGOSTARTX_DATA + XVMIX_LOGO_OFFSET;
+ y_reg = XVMIX_LOGOSTARTY_DATA + XVMIX_LOGO_OFFSET;
+ w_reg = XVMIX_LOGOWIDTH_DATA + XVMIX_LOGO_OFFSET;
+ h_reg = XVMIX_LOGOHEIGHT_DATA + XVMIX_LOGO_OFFSET;
+ } else {
+ x_reg = XVMIX_LOGOSTARTX_DATA;
+ y_reg = XVMIX_LOGOSTARTY_DATA;
+ w_reg = XVMIX_LOGOWIDTH_DATA;
+ h_reg = XVMIX_LOGOHEIGHT_DATA;
+ }
+ reg_writel(mixer->base, x_reg, x_pos);
+ reg_writel(mixer->base, y_reg, y_pos);
+ reg_writel(mixer->base, w_reg, width);
+ reg_writel(mixer->base, h_reg, height);
+ l_data->layer_regs.x_pos = x_pos;
+ l_data->layer_regs.y_pos = y_pos;
+ l_data->layer_regs.width = width;
+ l_data->layer_regs.height = height;
+ status = 0;
+ } else {
+ /*Layer1-Layer15*/
+
+ if (!(id < mixer->layer_cnt &&
+ width <= l_data->hw_config.max_width &&
+ width >= l_data->hw_config.min_width))
+ return status;
+ x_reg = XVMIX_LAYERSTARTX_0_DATA;
+ y_reg = XVMIX_LAYERSTARTY_0_DATA;
+ w_reg = XVMIX_LAYERWIDTH_0_DATA;
+ h_reg = XVMIX_LAYERHEIGHT_0_DATA;
+ s_reg = XVMIX_LAYERSTRIDE_0_DATA;
+
+ off = id * XVMIX_REG_OFFSET;
+ reg_writel(mixer->base, (x_reg + off), x_pos);
+ reg_writel(mixer->base, (y_reg + off), y_pos);
+ reg_writel(mixer->base, (w_reg + off), width);
+ reg_writel(mixer->base, (h_reg + off), height);
+ l_data->layer_regs.x_pos = x_pos;
+ l_data->layer_regs.y_pos = y_pos;
+ l_data->layer_regs.width = width;
+ l_data->layer_regs.height = height;
+
+ if (!l_data->hw_config.is_streaming)
+ reg_writel(mixer->base, (s_reg + off), stride);
+ status = 0;
+ }
+ return status;
+}
+
+/**
+ * xlnx_mix_set_layer_dimensions - Set layer dimensions
+ * @plane: Drm plane object desribing video layer to reposition
+ * @crtc_x: New horizontal anchor postion from which to begin rendering
+ * @crtc_y: New vertical anchor position from which to begin rendering
+ * @width: Width, in pixels, to render from stream or memory buffer
+ * @height: Height, in pixels, to render from stream or memory buffer
+ * @stride: Width, in bytes, of a memory buffer. Used only for
+ * memory layers. Use 0 for streaming layers.
+ *
+ * Establishes new coordinates and dimensions for a video plane layer
+ * New size and coordinates of window must fit within the currently active
+ * area of the crtc (e.g. the background resolution)
+ *
+ * Return: 0 if successful; Either -EINVAL if coordindate data is invalid
+ * or -ENODEV if layer data not present
+ */
+static int xlnx_mix_set_layer_dimensions(struct xlnx_mix_plane *plane,
+ u32 crtc_x, u32 crtc_y,
+ u32 width, u32 height, u32 stride)
+{
+ struct xlnx_mix *mixer = plane->mixer;
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ struct xlnx_mix_layer_data *layer_data;
+ enum xlnx_mix_layer_id layer_id;
+ int ret = 0;
+
+ layer_data = plane->mixer_layer;
+ layer_id = layer_data->id;
+ if (layer_data->layer_regs.height != height ||
+ layer_data->layer_regs.width != width) {
+ if (mixer->drm_primary_layer == plane)
+ xlnx_mix_layer_disable(mixer_hw, XVMIX_LAYER_MASTER);
+
+ xlnx_mix_layer_disable(mixer_hw, layer_id);
+ }
+ if (mixer->drm_primary_layer == plane) {
+ crtc_x = 0;
+ crtc_y = 0;
+ ret = xlnx_mix_set_active_area(mixer_hw, width, height);
+ if (ret)
+ return ret;
+ xlnx_mix_layer_enable(mixer_hw, XVMIX_LAYER_MASTER);
+ }
+ if (layer_id != XVMIX_LAYER_MASTER && layer_id < mixer_hw->max_layers) {
+ ret = xlnx_mix_set_layer_window(mixer_hw, layer_id, crtc_x,
+ crtc_y, width, height, stride);
+ if (ret)
+ return ret;
+ xlnx_mix_disp_layer_enable(plane);
+ }
+ return ret;
+}
+
+/**
+ * xlnx_mix_set_layer_scaling - Sets scaling factor
+ * @mixer: Instance of mixer to be subject of scaling request
+ * @id: Logical id of video layer subject to new scale setting
+ * @scale: scale Factor (1x, 2x or 4x) for horiz. and vert. dimensions
+ *
+ * Sets the scaling factor for the specified video layer
+ * Not applicable to background stream layer (layer 0)
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure to set scale for layer (likely
+ * returned if resulting size of layer exceeds dimensions of active
+ * display area
+ */
+static int xlnx_mix_set_layer_scaling(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id, u32 scale)
+{
+ void __iomem *reg = mixer->base;
+ struct xlnx_mix_layer_data *l_data;
+ int status = 0;
+ u32 x_pos, y_pos, width, height, offset;
+
+ l_data = xlnx_mix_get_layer_data(mixer, id);
+ x_pos = l_data->layer_regs.x_pos;
+ y_pos = l_data->layer_regs.y_pos;
+ width = l_data->layer_regs.width;
+ height = l_data->layer_regs.height;
+
+ if (!is_window_valid(mixer, x_pos, y_pos, width, height, scale))
+ return -EINVAL;
+
+ if (id == mixer->logo_layer_id) {
+ if (mixer->logo_layer_en) {
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS)
+ reg_writel(reg, XVMIX_LOGOSCALEFACTOR_DATA +
+ XVMIX_LOGO_OFFSET, scale);
+ else
+ reg_writel(reg, XVMIX_LOGOSCALEFACTOR_DATA,
+ scale);
+ l_data->layer_regs.scale_fact = scale;
+ status = 0;
+ }
+ } else {
+ /* Layer0-Layer15 */
+ if (id < mixer->layer_cnt && l_data->hw_config.can_scale) {
+ offset = id * XVMIX_REG_OFFSET;
+
+ reg_writel(reg, (XVMIX_LAYERSCALE_0_DATA + offset),
+ scale);
+ l_data->layer_regs.scale_fact = scale;
+ status = 0;
+ }
+ }
+ return status;
+}
+
+/**
+ * xlnx_mix_set_layer_scale - Change video scale factor for video plane
+ * @plane: Drm plane object describing layer to be modified
+ * @val: Index of scale factor to use:
+ * 0 = 1x
+ * 1 = 2x
+ * 2 = 4x
+ *
+ * Return:
+ * Zero on success, either -EINVAL if scale value is illegal or
+ * -ENODEV if layer does not exist (null)
+ */
+static int xlnx_mix_set_layer_scale(struct xlnx_mix_plane *plane,
+ uint64_t val)
+{
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ struct xlnx_mix_layer_data *layer = plane->mixer_layer;
+ int ret;
+
+ if (!layer || !layer->hw_config.can_scale)
+ return -ENODEV;
+ if (val > XVMIX_SCALE_FACTOR_4X || val < XVMIX_SCALE_FACTOR_1X) {
+ DRM_ERROR("Mixer layer scale value illegal.\n");
+ return -EINVAL;
+ }
+ xlnx_mix_disp_layer_disable(plane);
+ msleep(50);
+ ret = xlnx_mix_set_layer_scaling(mixer_hw, layer->id, val);
+ xlnx_mix_disp_layer_enable(plane);
+
+ return ret;
+}
+
+/**
+ * xlnx_mix_set_layer_alpha - Set the alpha value
+ * @mixer: Instance of mixer controlling layer to modify
+ * @layer_id: Logical id of video overlay to adjust alpha setting
+ * @alpha: Desired alpha setting (0-255) for layer specified
+ * 255 = completely opaque
+ * 0 = fully transparent
+ *
+ * Set the layer global transparency for a video overlay
+ * Not applicable to background streaming layer
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_set_layer_alpha(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id layer_id, u32 alpha)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ u32 reg;
+ int status = -EINVAL;
+
+ layer_data = xlnx_mix_get_layer_data(mixer, layer_id);
+
+ if (layer_id == mixer->logo_layer_id) {
+ if (mixer->logo_layer_en) {
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS)
+ reg = XVMIX_LOGOALPHA_DATA + XVMIX_LOGO_OFFSET;
+ else
+ reg = XVMIX_LOGOALPHA_DATA;
+ reg_writel(mixer->base, reg, alpha);
+ layer_data->layer_regs.alpha = alpha;
+ status = 0;
+ }
+ } else {
+ /*Layer1-Layer15*/
+ if (layer_id < mixer->layer_cnt &&
+ layer_data->hw_config.can_alpha) {
+ u32 offset = layer_id * XVMIX_REG_OFFSET;
+
+ reg = XVMIX_LAYERALPHA_0_DATA;
+ reg_writel(mixer->base, (reg + offset), alpha);
+ layer_data->layer_regs.alpha = alpha;
+ status = 0;
+ }
+ }
+ return status;
+}
+
+/**
+ * xlnx_mix_disp_set_layer_alpha - Change the transparency of an entire plane
+ * @plane: Video layer affected by new alpha setting
+ * @val: Value of transparency setting (0-255) with 255 being opaque
+ * 0 being fully transparent
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_disp_set_layer_alpha(struct xlnx_mix_plane *plane,
+ uint64_t val)
+{
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ struct xlnx_mix_layer_data *layer = plane->mixer_layer;
+
+ if (!layer || !layer->hw_config.can_alpha)
+ return -ENODEV;
+ if (val > XVMIX_ALPHA_MAX || val < XVMIX_ALPHA_MIN) {
+ DRM_ERROR("Mixer layer alpha dts value illegal.\n");
+ return -EINVAL;
+ }
+ return xlnx_mix_set_layer_alpha(mixer_hw, layer->id, val);
+}
+
+/**
+ * xlnx_mix_set_layer_buff_addr - Set buff addr for layer
+ * @mixer: Instance of mixer controlling layer to modify
+ * @id: Logical id of video overlay to adjust alpha setting
+ * @luma_addr: Start address of plane 1 of frame buffer for layer 1
+ * @chroma_addr: Start address of plane 2 of frame buffer for layer 1
+ *
+ * Sets the buffer address of the specified layer
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_set_layer_buff_addr(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id,
+ dma_addr_t luma_addr,
+ dma_addr_t chroma_addr)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ u32 align, offset;
+ u32 reg1, reg2;
+
+ if (id >= mixer->layer_cnt)
+ return -EINVAL;
+
+ /* Check if addr is aligned to aximm width (PPC * 64-bits) */
+ align = mixer->ppc * 8;
+ if ((luma_addr % align) != 0 || (chroma_addr % align) != 0)
+ return -EINVAL;
+
+ offset = (id - 1) * XVMIX_REG_OFFSET;
+ reg1 = XVMIX_LAYER1_BUF1_V_DATA + offset;
+ reg2 = XVMIX_LAYER1_BUF2_V_DATA + offset;
+ layer_data = &mixer->layer_data[id];
+ if (mixer->dma_addr_size == 64 && sizeof(dma_addr_t) == 8) {
+ reg_writeq(mixer->base, reg1, luma_addr);
+ reg_writeq(mixer->base, reg2, chroma_addr);
+ } else {
+ reg_writel(mixer->base, reg1, (u32)luma_addr);
+ reg_writel(mixer->base, reg2, (u32)chroma_addr);
+ }
+ layer_data->layer_regs.buff_addr1 = luma_addr;
+ layer_data->layer_regs.buff_addr2 = chroma_addr;
+
+ return 0;
+}
+
+/**
+ * xlnx_mix_hw_plane_dpms - Implementation of display power management
+ * system call (dpms).
+ * @plane: Plane/mixer layer to enable/disable (based on dpms value)
+ * @dpms: Display power management state to act upon
+ *
+ * Designed to disable and turn off a plane and restore all attached drm
+ * properities to their initial values. Alterntively, if dpms is "on", will
+ * enable a layer.
+ */
+
+static void
+xlnx_mix_hw_plane_dpms(struct xlnx_mix_plane *plane, int dpms)
+{
+ struct xlnx_mix *mixer;
+
+ if (!plane->mixer)
+ return;
+ mixer = plane->mixer;
+ plane->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ xlnx_mix_disp_layer_enable(plane);
+ break;
+ default:
+ xlnx_mix_mark_layer_inactive(plane);
+ xlnx_mix_disp_layer_disable(plane);
+ /* restore to default property values */
+ if (mixer->alpha_prop)
+ xlnx_mix_disp_set_layer_alpha(plane, XVMIX_ALPHA_MAX);
+ if (mixer->scale_prop)
+ xlnx_mix_set_layer_scale(plane, XVMIX_SCALE_FACTOR_1X);
+ }
+}
+
+static void xlnx_mix_plane_dpms(struct drm_plane *base_plane, int dpms)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ unsigned int i;
+
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+ DRM_DEBUG_KMS("dpms: %d -> %d\n", plane->dpms, dpms);
+
+ if (plane->dpms == dpms)
+ return;
+ plane->dpms = dpms;
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ /* start dma engine */
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan && plane->dma[i].is_active)
+ dma_async_issue_pending(plane->dma[i].chan);
+ xlnx_mix_hw_plane_dpms(plane, dpms);
+ break;
+ default:
+ xlnx_mix_hw_plane_dpms(plane, dpms);
+ /* stop dma engine and release descriptors */
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++) {
+ if (plane->dma[i].chan && plane->dma[i].is_active) {
+ dmaengine_terminate_sync(plane->dma[i].chan);
+ plane->dma[i].is_active = false;
+ }
+ }
+ break;
+ }
+}
+
+static int
+xlnx_mix_disp_plane_atomic_set_property(struct drm_plane *base_plane,
+ struct drm_plane_state *state,
+ struct drm_property *property, u64 val)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ struct xlnx_mix *mixer = plane->mixer;
+
+ if (property == mixer->alpha_prop)
+ return xlnx_mix_disp_set_layer_alpha(plane, val);
+ else if (property == mixer->scale_prop)
+ return xlnx_mix_set_layer_scale(plane, val);
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static int
+xlnx_mix_disp_plane_atomic_get_property(struct drm_plane *base_plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ struct xlnx_mix *mixer = plane->mixer;
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ u32 layer_id = plane->mixer_layer->id;
+
+ if (property == mixer->alpha_prop)
+ *val = mixer_hw->layer_data[layer_id].layer_regs.alpha;
+ else if (property == mixer->scale_prop)
+ *val = mixer_hw->layer_data[layer_id].layer_regs.scale_fact;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * xlnx_mix_disp_plane_atomic_update_plane - plane update using atomic
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @crtc_x: x offset of primary plane on crtc
+ * @crtc_y: y offset of primary plane on crtc
+ * @crtc_w: width of primary plane rectangle on crtc
+ * @crtc_h: height of primary plane rectangle on crtc
+ * @src_x: x offset of @fb for panning
+ * @src_y: y offset of @fb for panning
+ * @src_w: width of source rectangle in @fb
+ * @src_h: height of source rectangle in @fb
+ * @ctx: lock acquire context
+ *
+ * Provides a default plane update handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int
+xlnx_mix_disp_plane_atomic_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w,
+ unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ goto fail;
+
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_w = crtc_w;
+ plane_state->crtc_h = crtc_h;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_w = src_w;
+ plane_state->src_h = src_h;
+
+ if (plane == crtc->cursor)
+ state->legacy_cursor_update = true;
+
+ /* Do async-update if possible */
+ state->async_update = !drm_atomic_helper_async_check(plane->dev, state);
+
+ ret = drm_atomic_commit(state);
+
+fail:
+ drm_atomic_state_put(state);
+ return ret;
+}
+
+static struct drm_plane_funcs xlnx_mix_plane_funcs = {
+ .update_plane = xlnx_mix_disp_plane_atomic_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .atomic_set_property = xlnx_mix_disp_plane_atomic_set_property,
+ .atomic_get_property = xlnx_mix_disp_plane_atomic_get_property,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/**
+ * xlnx_mix_logo_load - Loads mixer's internal bram
+ * @mixer: Mixer instance to act upon
+ * @logo_w: Width of logo in pixels
+ * @logo_h: Height of logo in pixels
+ * @r_buf: Pointer to byte buffer array of R data values
+ * @g_buf: Pointer to byte buffer array of G data values
+ * @b_buf: Pointer to byte buffer array of B data values
+ * @a_buf: Pointer to byte buffer array of A data values
+ *
+ * Loads mixer's internal bram with planar R, G, B and A data
+ *
+ * Return:
+ * Zero on success, -ENODEV if logo layer not enabled; -EINVAL otherwise
+ */
+static int xlnx_mix_logo_load(struct xlnx_mix_hw *mixer, u32 logo_w, u32 logo_h,
+ u8 *r_buf, u8 *g_buf, u8 *b_buf, u8 *a_buf)
+{
+ void __iomem *reg = mixer->base;
+ struct xlnx_mix_layer_data *layer_data;
+
+ int x;
+ u32 shift;
+ u32 rword, gword, bword, aword;
+ u32 pixel_cnt = logo_w * logo_h;
+ u32 unaligned_pix_cnt = pixel_cnt % 4;
+ u32 width, height, curr_x_pos, curr_y_pos;
+ u32 rbase_addr, gbase_addr, bbase_addr, abase_addr;
+
+ layer_data = xlnx_mix_get_layer_data(mixer, mixer->logo_layer_id);
+ rword = 0;
+ gword = 0;
+ bword = 0;
+ aword = 0;
+
+ if (!layer_data)
+ return -ENODEV;
+
+ /* RGBA data should be 32-bit word aligned */
+ if (unaligned_pix_cnt && mixer->logo_pixel_alpha_enabled)
+ return -EINVAL;
+
+ if (!(mixer->logo_layer_en &&
+ logo_w <= layer_data->hw_config.max_width &&
+ logo_h <= layer_data->hw_config.max_height))
+ return -EINVAL;
+
+ width = logo_w;
+ height = logo_h;
+ rbase_addr = XVMIX_LOGOR_V_BASE;
+ gbase_addr = XVMIX_LOGOG_V_BASE;
+ bbase_addr = XVMIX_LOGOB_V_BASE;
+ abase_addr = XVMIX_LOGOA_V_BASE;
+
+ for (x = 0; x < pixel_cnt; x++) {
+ shift = (x % 4) * 8;
+ rword |= r_buf[x] << shift;
+ gword |= g_buf[x] << shift;
+ bword |= b_buf[x] << shift;
+ if (mixer->logo_pixel_alpha_enabled)
+ aword |= a_buf[x] << shift;
+
+ if (x % 4 == 3) {
+ reg_writel(reg, (rbase_addr + (x - 3)), rword);
+ reg_writel(reg, (gbase_addr + (x - 3)), gword);
+ reg_writel(reg, (bbase_addr + (x - 3)), bword);
+ if (mixer->logo_pixel_alpha_enabled)
+ reg_writel(reg, (abase_addr + (x - 3)), aword);
+ }
+ }
+
+ curr_x_pos = layer_data->layer_regs.x_pos;
+ curr_y_pos = layer_data->layer_regs.y_pos;
+ return xlnx_mix_set_layer_window(mixer, mixer->logo_layer_id,
+ curr_x_pos, curr_y_pos,
+ logo_w, logo_h, 0);
+}
+
+static int xlnx_mix_update_logo_img(struct xlnx_mix_plane *plane,
+ struct drm_gem_cma_object *buffer,
+ u32 src_w, u32 src_h)
+{
+ struct xlnx_mix_layer_data *logo_layer = plane->mixer_layer;
+ struct xlnx_mix_hw *mixer = to_mixer_hw(plane);
+ size_t pixel_cnt = src_h * src_w;
+ /* color comp defaults to offset in RG24 buffer */
+ u32 pix_cmp_cnt;
+ u32 logo_cmp_cnt;
+ bool per_pixel_alpha = false;
+ u32 max_width = logo_layer->hw_config.max_width;
+ u32 max_height = logo_layer->hw_config.max_height;
+ u32 min_width = logo_layer->hw_config.min_width;
+ u32 min_height = logo_layer->hw_config.min_height;
+ u8 *r_data = NULL;
+ u8 *g_data = NULL;
+ u8 *b_data = NULL;
+ u8 *a_data = NULL;
+ size_t el_size = sizeof(u8);
+ u8 *pixel_mem_data;
+ int ret, i, j;
+
+ /* ensure valid conditions for update */
+ if (logo_layer->id != mixer->logo_layer_id)
+ return 0;
+
+ if (src_h > max_height || src_w > max_width ||
+ src_h < min_height || src_w < min_width) {
+ DRM_ERROR("Mixer logo/cursor layer dimensions illegal.\n");
+ return -EINVAL;
+ }
+
+ if (!xlnx_mix_isfmt_support(plane->mixer_layer->hw_config.vid_fmt)) {
+ DRM_ERROR("DRM color format not supported for logo layer\n");
+ return -EINVAL;
+ }
+ per_pixel_alpha = (logo_layer->hw_config.vid_fmt ==
+ DRM_FORMAT_RGBA8888) ? true : false;
+ r_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+ g_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+ b_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+ if (per_pixel_alpha)
+ a_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+
+ if (!r_data || !g_data || !b_data || (per_pixel_alpha && !a_data)) {
+ DRM_ERROR("Unable to allocate memory for logo layer data\n");
+ ret = -ENOMEM;
+ goto free;
+ }
+ pix_cmp_cnt = per_pixel_alpha ? 4 : 3;
+ logo_cmp_cnt = pixel_cnt * pix_cmp_cnt;
+ /* ensure buffer attributes have changed to indicate new logo
+ * has been created
+ */
+ if ((phys_addr_t)buffer->vaddr == logo_layer->layer_regs.buff_addr1 &&
+ src_w == logo_layer->layer_regs.width &&
+ src_h == logo_layer->layer_regs.height)
+ return 0;
+
+ /* cache buffer address for future comparison */
+ logo_layer->layer_regs.buff_addr1 = (phys_addr_t)buffer->vaddr;
+ pixel_mem_data = (u8 *)(buffer->vaddr);
+ for (i = 0, j = 0; j < pixel_cnt; j++) {
+ if (per_pixel_alpha && a_data)
+ a_data[j] = pixel_mem_data[i++];
+
+ b_data[j] = pixel_mem_data[i++];
+ g_data[j] = pixel_mem_data[i++];
+ r_data[j] = pixel_mem_data[i++];
+ }
+ ret = xlnx_mix_logo_load(to_mixer_hw(plane), src_w, src_h, r_data,
+ g_data, b_data,
+ per_pixel_alpha ? a_data : NULL);
+free:
+ kfree(r_data);
+ kfree(g_data);
+ kfree(b_data);
+ kfree(a_data);
+
+ return ret;
+}
+
+/**
+ * xlnx_mix_set_plane - Implementation of DRM plane_update callback
+ * @plane: xlnx_mix_plane object containing references to
+ * the base plane and mixer
+ * @fb: Framebuffer descriptor
+ * @crtc_x: X position of layer on crtc. Note, if the plane represents either
+ * the master hardware layer (video0) or the layer representing the DRM primary
+ * layer, the crtc x/y coordinates are either ignored and/or set to 0/0
+ * respectively.
+ * @crtc_y: Y position of layer. See description of crtc_x handling
+ * for more inforation.
+ * @src_x: x-offset in memory buffer from which to start reading
+ * @src_y: y-offset in memory buffer from which to start reading
+ * @src_w: Number of horizontal pixels to read from memory per row
+ * @src_h: Number of rows of video data to read from memory
+ *
+ * Configures a mixer layer to comply with user space SET_PLANE icotl
+ * call.
+ *
+ * Return:
+ * Zero on success, non-zero linux error code otherwise.
+ */
+static int xlnx_mix_set_plane(struct xlnx_mix_plane *plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ struct xlnx_mix *mixer;
+ struct drm_gem_cma_object *luma_buffer;
+ u32 luma_stride = fb->pitches[0];
+ dma_addr_t luma_addr, chroma_addr = 0;
+ u32 active_area_width;
+ u32 active_area_height;
+ enum xlnx_mix_layer_id layer_id;
+ int ret;
+ const struct drm_format_info *info = fb->format;
+
+ mixer = plane->mixer;
+ mixer_hw = &mixer->mixer_hw;
+ layer_id = plane->mixer_layer->id;
+ active_area_width =
+ mixer->drm_primary_layer->mixer_layer->layer_regs.width;
+ active_area_height =
+ mixer->drm_primary_layer->mixer_layer->layer_regs.height;
+ /* compute memory data */
+ luma_buffer = drm_fb_cma_get_gem_obj(fb, 0);
+ luma_addr = drm_fb_cma_get_gem_addr(fb, plane->base.state, 0);
+ if (!luma_addr) {
+ DRM_ERROR("%s failed to get luma paddr\n", __func__);
+ return -EINVAL;
+ }
+
+ if (info->num_planes > 1) {
+ chroma_addr = drm_fb_cma_get_gem_addr(fb, plane->base.state, 1);
+ if (!chroma_addr) {
+ DRM_ERROR("failed to get chroma paddr\n");
+ return -EINVAL;
+ }
+ }
+ ret = xlnx_mix_mark_layer_active(plane);
+ if (ret)
+ return ret;
+
+ switch (layer_id) {
+ case XVMIX_LAYER_MASTER:
+ if (!plane->mixer_layer->hw_config.is_streaming)
+ xlnx_mix_mark_layer_inactive(plane);
+ if (mixer->drm_primary_layer == mixer->hw_master_layer) {
+ xlnx_mix_layer_disable(mixer_hw, layer_id);
+ ret = xlnx_mix_set_active_area(mixer_hw, src_w, src_h);
+ if (ret)
+ return ret;
+ xlnx_mix_layer_enable(mixer_hw, layer_id);
+
+ } else if (src_w != active_area_width ||
+ src_h != active_area_height) {
+ DRM_ERROR("Invalid dimensions for mixer layer 0.\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ ret = xlnx_mix_set_layer_dimensions(plane, crtc_x, crtc_y,
+ src_w, src_h, luma_stride);
+ if (ret)
+ break;
+ if (layer_id == mixer_hw->logo_layer_id) {
+ ret = xlnx_mix_update_logo_img(plane, luma_buffer,
+ src_w, src_h);
+ } else {
+ if (!plane->mixer_layer->hw_config.is_streaming)
+ ret = xlnx_mix_set_layer_buff_addr
+ (mixer_hw, plane->mixer_layer->id,
+ luma_addr, chroma_addr);
+ }
+ }
+ return ret;
+}
+
+/* mode set a plane */
+static int xlnx_mix_plane_mode_set(struct drm_plane *base_plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, uint32_t src_y,
+ u32 src_w, uint32_t src_h)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ const struct drm_format_info *info = fb->format;
+ size_t i = 0;
+ dma_addr_t luma_paddr;
+ int ret;
+ u32 stride;
+
+ /* JPM TODO begin start of code to extract into prep-interleaved*/
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+ DRM_DEBUG_KMS("h: %d(%d), v: %d(%d)\n", src_w, crtc_x, src_h, crtc_y);
+
+ /* We have multiple dma channels. Set each per video plane */
+ for (; i < info->num_planes; i++) {
+ unsigned int width = src_w / (i ? info->hsub : 1);
+ unsigned int height = src_h / (i ? info->vsub : 1);
+
+ luma_paddr = drm_fb_cma_get_gem_addr(fb, base_plane->state, i);
+ if (!luma_paddr) {
+ DRM_ERROR("%s failed to get luma paddr\n", __func__);
+ return -EINVAL;
+ }
+
+ plane->dma[i].xt.numf = height;
+ plane->dma[i].sgl[0].size =
+ drm_format_plane_width_bytes(info, 0, width);
+ plane->dma[i].sgl[0].icg = fb->pitches[0] -
+ plane->dma[i].sgl[0].size;
+ plane->dma[i].xt.src_start = luma_paddr;
+ plane->dma[i].xt.frame_size = info->num_planes;
+ plane->dma[i].xt.dir = DMA_MEM_TO_DEV;
+ plane->dma[i].xt.src_sgl = true;
+ plane->dma[i].xt.dst_sgl = false;
+ plane->dma[i].is_active = true;
+ }
+
+ for (; i < XVMIX_MAX_NUM_SUB_PLANES; i++)
+ plane->dma[i].is_active = false;
+ /* Do we have a video format aware dma channel?
+ * If so, modify descriptor accordingly
+ */
+ if (plane->dma[0].chan && !plane->dma[1].chan && info->num_planes > 1) {
+ stride = plane->dma[0].sgl[0].size + plane->dma[0].sgl[0].icg;
+ plane->dma[0].sgl[0].src_icg = plane->dma[1].xt.src_start -
+ plane->dma[0].xt.src_start -
+ (plane->dma[0].xt.numf * stride);
+ }
+
+ ret = xlnx_mix_set_plane(plane, fb, crtc_x, crtc_y, src_x, src_y,
+ src_w, src_h);
+ return ret;
+}
+
+static int xlnx_mix_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ return 0;
+}
+
+static void xlnx_mix_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+}
+
+static int xlnx_mix_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ int scale;
+ struct xlnx_mix_plane *mix_plane = to_xlnx_plane(plane);
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(mix_plane);
+ struct xlnx_mix *mix;
+
+ /* No check required for the drm_primary_plane */
+ mix = container_of(mixer_hw, struct xlnx_mix, mixer_hw);
+ if (mix->drm_primary_layer == mix_plane)
+ return 0;
+
+ scale = xlnx_mix_get_layer_scaling(mixer_hw,
+ mix_plane->mixer_layer->id);
+ if (is_window_valid(mixer_hw, state->crtc_x, state->crtc_y,
+ state->src_w >> 16, state->src_h >> 16, scale))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void xlnx_mix_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ int ret;
+
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ if (old_state->fb &&
+ old_state->fb->format->format != plane->state->fb->format->format)
+ xlnx_mix_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+
+ ret = xlnx_mix_plane_mode_set(plane, plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret) {
+ DRM_ERROR("failed to mode-set a plane\n");
+ return;
+ }
+ /* apply the new fb addr */
+ xlnx_mix_plane_commit(plane);
+ /* make sure a plane is on */
+ xlnx_mix_plane_dpms(plane, DRM_MODE_DPMS_ON);
+}
+
+static void xlnx_mix_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ xlnx_mix_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+}
+
+static int xlnx_mix_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void
+xlnx_mix_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(new_state->state, plane);
+
+ /* Update the current state with new configurations */
+ drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+ plane->state->crtc = new_state->crtc;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_w = new_state->src_w;
+ plane->state->src_h = new_state->src_h;
+ plane->state->state = new_state->state;
+
+ xlnx_mix_plane_atomic_update(plane, old_state);
+}
+
+static const struct drm_plane_helper_funcs xlnx_mix_plane_helper_funcs = {
+ .prepare_fb = xlnx_mix_plane_prepare_fb,
+ .cleanup_fb = xlnx_mix_plane_cleanup_fb,
+ .atomic_check = xlnx_mix_plane_atomic_check,
+ .atomic_update = xlnx_mix_plane_atomic_update,
+ .atomic_disable = xlnx_mix_plane_atomic_disable,
+ .atomic_async_check = xlnx_mix_plane_atomic_async_check,
+ .atomic_async_update = xlnx_mix_plane_atomic_async_update,
+};
+
+static int xlnx_mix_init_plane(struct xlnx_mix_plane *plane,
+ unsigned int poss_crtcs,
+ struct device_node *layer_node)
+{
+ struct xlnx_mix *mixer = plane->mixer;
+ char name[16];
+ enum drm_plane_type type;
+ int ret, i;
+
+ plane->dpms = DRM_MODE_DPMS_OFF;
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++) {
+ snprintf(name, sizeof(name), "dma%d", i);
+ plane->dma[i].chan = of_dma_request_slave_channel(layer_node,
+ name);
+ if (PTR_ERR(plane->dma[i].chan) == -ENODEV) {
+ plane->dma[i].chan = NULL;
+ continue;
+ }
+ if (IS_ERR(plane->dma[i].chan)) {
+ DRM_ERROR("failed to request dma channel\n");
+ ret = PTR_ERR(plane->dma[i].chan);
+ plane->dma[i].chan = NULL;
+ goto err_dma;
+ }
+ }
+ if (!xlnx_mix_isfmt_support(plane->mixer_layer->hw_config.vid_fmt)) {
+ DRM_ERROR("DRM color format not supported by mixer\n");
+ ret = -ENODEV;
+ goto err_init;
+ }
+ plane->format = plane->mixer_layer->hw_config.vid_fmt;
+ if (plane == mixer->hw_logo_layer)
+ type = DRM_PLANE_TYPE_CURSOR;
+ if (plane == mixer->drm_primary_layer)
+ type = DRM_PLANE_TYPE_PRIMARY;
+
+ /* initialize drm plane */
+ ret = drm_universal_plane_init(mixer->drm, &plane->base,
+ poss_crtcs, &xlnx_mix_plane_funcs,
+ &plane->format,
+ 1, NULL, type, NULL);
+
+ if (ret) {
+ DRM_ERROR("failed to initialize plane\n");
+ goto err_init;
+ }
+ drm_plane_helper_add(&plane->base, &xlnx_mix_plane_helper_funcs);
+ of_node_put(layer_node);
+
+ return 0;
+
+err_init:
+ xlnx_mix_disp_layer_disable(plane);
+err_dma:
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan)
+ dma_release_channel(plane->dma[i].chan);
+
+ of_node_put(layer_node);
+ return ret;
+}
+
+static int xlnx_mix_parse_dt_bg_video_fmt(struct device_node *node,
+ struct xlnx_mix_hw *mixer_hw)
+{
+ struct device_node *layer_node;
+ struct xlnx_mix_layer_data *layer;
+ const char *vformat;
+
+ layer_node = of_get_child_by_name(node, "layer_0");
+ layer = &mixer_hw->layer_data[XVMIX_MASTER_LAYER_IDX];
+
+ /* Set default values */
+ layer->hw_config.can_alpha = false;
+ layer->hw_config.can_scale = false;
+ layer->hw_config.min_width = XVMIX_LAYER_WIDTH_MIN;
+ layer->hw_config.min_height = XVMIX_LAYER_HEIGHT_MIN;
+
+ if (of_property_read_string(layer_node, "xlnx,vformat",
+ &vformat)) {
+ DRM_ERROR("No xlnx,vformat value for layer 0 in dts\n");
+ return -EINVAL;
+ }
+ strcpy((char *)&layer->hw_config.vid_fmt, vformat);
+ layer->hw_config.is_streaming =
+ of_property_read_bool(layer_node, "xlnx,layer-streaming");
+ if (of_property_read_u32(node, "xlnx,bpc", &mixer_hw->bg_layer_bpc)) {
+ DRM_ERROR("Failed to get bits per component (bpc) prop\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32(layer_node, "xlnx,layer-max-width",
+ &layer->hw_config.max_width)) {
+ DRM_ERROR("Failed to get screen width prop\n");
+ return -EINVAL;
+ }
+ mixer_hw->max_layer_width = layer->hw_config.max_width;
+ if (of_property_read_u32(layer_node, "xlnx,layer-max-height",
+ &layer->hw_config.max_height)) {
+ DRM_ERROR("Failed to get screen height prop\n");
+ return -EINVAL;
+ }
+ mixer_hw->max_layer_height = layer->hw_config.max_height;
+ layer->id = XVMIX_LAYER_MASTER;
+
+ return 0;
+}
+
+static int xlnx_mix_parse_dt_logo_data(struct device_node *node,
+ struct xlnx_mix_hw *mixer_hw)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ struct device_node *logo_node;
+ u32 max_width, max_height;
+
+ logo_node = of_get_child_by_name(node, "logo");
+ if (!logo_node) {
+ DRM_ERROR("No logo node specified in device tree.\n");
+ return -EINVAL;
+ }
+
+ layer_data = &mixer_hw->layer_data[XVMIX_LOGO_LAYER_IDX];
+
+ /* set defaults for logo layer */
+ layer_data->hw_config.min_height = XVMIX_LOGO_LAYER_HEIGHT_MIN;
+ layer_data->hw_config.min_width = XVMIX_LOGO_LAYER_WIDTH_MIN;
+ layer_data->hw_config.is_streaming = false;
+ layer_data->hw_config.vid_fmt = DRM_FORMAT_RGB888;
+ layer_data->hw_config.can_alpha = true;
+ layer_data->hw_config.can_scale = true;
+ layer_data->layer_regs.buff_addr1 = 0;
+ layer_data->layer_regs.buff_addr2 = 0;
+ layer_data->id = mixer_hw->logo_layer_id;
+
+ if (of_property_read_u32(logo_node, "xlnx,logo-width", &max_width)) {
+ DRM_ERROR("Failed to get logo width prop\n");
+ return -EINVAL;
+ }
+ if (max_width > XVMIX_LOGO_LAYER_WIDTH_MAX ||
+ max_width < XVMIX_LOGO_LAYER_WIDTH_MIN) {
+ DRM_ERROR("Illegal mixer logo layer width.\n");
+ return -EINVAL;
+ }
+ layer_data->hw_config.max_width = max_width;
+ mixer_hw->max_logo_layer_width = layer_data->hw_config.max_width;
+
+ if (of_property_read_u32(logo_node, "xlnx,logo-height", &max_height)) {
+ DRM_ERROR("Failed to get logo height prop\n");
+ return -EINVAL;
+ }
+ if (max_height > XVMIX_LOGO_LAYER_HEIGHT_MAX ||
+ max_height < XVMIX_LOGO_LAYER_HEIGHT_MIN) {
+ DRM_ERROR("Illegal mixer logo layer height.\n");
+ return -EINVAL;
+ }
+ layer_data->hw_config.max_height = max_height;
+ mixer_hw->max_logo_layer_height = layer_data->hw_config.max_height;
+ mixer_hw->logo_pixel_alpha_enabled =
+ of_property_read_bool(logo_node, "xlnx,logo-pixel-alpha");
+ if (mixer_hw->logo_pixel_alpha_enabled)
+ layer_data->hw_config.vid_fmt = DRM_FORMAT_RGBA8888;
+
+ return 0;
+}
+
+static int xlnx_mix_dt_parse(struct device *dev, struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_plane *planes;
+ struct xlnx_mix_hw *mixer_hw;
+ struct device_node *node, *vtc_node;
+ struct xlnx_mix_layer_data *l_data;
+ struct resource res;
+ int ret, l_cnt, i;
+
+ node = dev->of_node;
+ mixer_hw = &mixer->mixer_hw;
+ mixer->dpms = DRM_MODE_DPMS_OFF;
+
+ mixer_hw->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(mixer_hw->reset_gpio)) {
+ ret = PTR_ERR(mixer_hw->reset_gpio);
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(dev, "No gpio probed for mixer. Deferring\n");
+ else
+ dev_err(dev, "No reset gpio info from dts for mixer\n");
+ return ret;
+ }
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 0);
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 1);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "Invalid memory address for mixer %d\n", ret);
+ return ret;
+ }
+ /* Read in mandatory global dts properties */
+ mixer_hw->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(mixer_hw->base)) {
+ dev_err(dev, "Failed to map io mem space for mixer\n");
+ return PTR_ERR(mixer_hw->base);
+ }
+ if (of_device_is_compatible(dev->of_node, "xlnx,mixer-4.0")) {
+ mixer_hw->max_layers = 18;
+ mixer_hw->logo_en_mask = BIT(23);
+ mixer_hw->enable_all_mask = (GENMASK(16, 0) |
+ mixer_hw->logo_en_mask);
+ } else {
+ mixer_hw->max_layers = 10;
+ mixer_hw->logo_en_mask = BIT(15);
+ mixer_hw->enable_all_mask = (GENMASK(8, 0) |
+ mixer_hw->logo_en_mask);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-layers",
+ &mixer_hw->num_layers);
+ if (ret) {
+ dev_err(dev, "No xlnx,num-layers dts prop for mixer node\n");
+ return ret;
+ }
+ mixer_hw->logo_layer_id = mixer_hw->max_layers - 1;
+ if (mixer_hw->num_layers > mixer_hw->max_layers) {
+ dev_err(dev, "Num layer nodes in device tree > mixer max\n");
+ return -EINVAL;
+ }
+ ret = of_property_read_u32(node, "xlnx,dma-addr-width",
+ &mixer_hw->dma_addr_size);
+ if (ret) {
+ dev_err(dev, "missing addr-width dts prop\n");
+ return ret;
+ }
+ if (mixer_hw->dma_addr_size != 32 && mixer_hw->dma_addr_size != 64) {
+ dev_err(dev, "invalid addr-width dts prop\n");
+ return -EINVAL;
+ }
+
+ /* VTC Bridge support */
+ vtc_node = of_parse_phandle(node, "xlnx,bridge", 0);
+ if (vtc_node) {
+ mixer->vtc_bridge = of_xlnx_bridge_get(vtc_node);
+ if (!mixer->vtc_bridge) {
+ dev_info(dev, "Didn't get vtc bridge instance\n");
+ return -EPROBE_DEFER;
+ }
+ } else {
+ dev_info(dev, "vtc bridge property not present\n");
+ }
+
+ mixer_hw->logo_layer_en = of_property_read_bool(node,
+ "xlnx,logo-layer");
+ l_cnt = mixer_hw->num_layers + (mixer_hw->logo_layer_en ? 1 : 0);
+ mixer_hw->layer_cnt = l_cnt;
+
+ l_data = devm_kzalloc(dev, sizeof(*l_data) * l_cnt, GFP_KERNEL);
+ if (!l_data)
+ return -ENOMEM;
+ mixer_hw->layer_data = l_data;
+ /* init DRM planes */
+ planes = devm_kzalloc(dev, sizeof(*planes) * l_cnt, GFP_KERNEL);
+ if (!planes)
+ return -ENOMEM;
+ mixer->planes = planes;
+ mixer->num_planes = l_cnt;
+ for (i = 0; i < mixer->num_planes; i++)
+ mixer->planes[i].mixer = mixer;
+
+ /* establish background layer video properties from dts */
+ ret = xlnx_mix_parse_dt_bg_video_fmt(node, mixer_hw);
+ if (ret)
+ return ret;
+ if (mixer_hw->logo_layer_en) {
+ /* read logo data from dts */
+ ret = xlnx_mix_parse_dt_logo_data(node, mixer_hw);
+ return ret;
+ }
+ return 0;
+}
+
+static int xlnx_mix_of_init_layer(struct device *dev, struct device_node *node,
+ char *name, struct xlnx_mix_layer_data *layer,
+ u32 max_width, struct xlnx_mix *mixer, int id)
+{
+ struct device_node *layer_node;
+ const char *vformat;
+ int ret;
+
+ layer_node = of_get_child_by_name(node, name);
+ if (!layer_node)
+ return -EINVAL;
+
+ /* Set default values */
+ layer->hw_config.can_alpha = false;
+ layer->hw_config.can_scale = false;
+ layer->hw_config.is_streaming = false;
+ layer->hw_config.max_width = max_width;
+ layer->hw_config.min_width = XVMIX_LAYER_WIDTH_MIN;
+ layer->hw_config.min_height = XVMIX_LAYER_HEIGHT_MIN;
+ layer->hw_config.vid_fmt = 0;
+ layer->id = 0;
+ mixer->planes[id].mixer_layer = layer;
+
+ ret = of_property_read_u32(layer_node, "xlnx,layer-id", &layer->id);
+ if (ret) {
+ dev_err(dev, "xlnx,layer-id property not found\n");
+ return ret;
+ }
+ if (layer->id < 1 || layer->id >= mixer->mixer_hw.max_layers) {
+ dev_err(dev, "Mixer layer id %u in dts is out of legal range\n",
+ layer->id);
+ return -EINVAL;
+ }
+ ret = of_property_read_string(layer_node, "xlnx,vformat", &vformat);
+ if (ret) {
+ dev_err(dev, "No mixer layer vformat in dts for layer id %d\n",
+ layer->id);
+ return ret;
+ }
+
+ strcpy((char *)&layer->hw_config.vid_fmt, vformat);
+ layer->hw_config.can_scale =
+ of_property_read_bool(layer_node, "xlnx,layer-scale");
+ if (layer->hw_config.can_scale) {
+ ret = of_property_read_u32(layer_node, "xlnx,layer-max-width",
+ &layer->hw_config.max_width);
+ if (ret) {
+ dev_err(dev, "Mixer layer %d dts missing width prop.\n",
+ layer->id);
+ return ret;
+ }
+
+ if (layer->hw_config.max_width > max_width) {
+ dev_err(dev, "Illlegal Mixer layer %d width %d\n",
+ layer->id, layer->hw_config.max_width);
+ return -EINVAL;
+ }
+ }
+ layer->hw_config.can_alpha =
+ of_property_read_bool(layer_node, "xlnx,layer-alpha");
+ layer->hw_config.is_streaming =
+ of_property_read_bool(layer_node, "xlnx,layer-streaming");
+ if (of_property_read_bool(layer_node, "xlnx,layer-primary")) {
+ if (mixer->drm_primary_layer) {
+ dev_err(dev,
+ "More than one primary layer in mixer dts\n");
+ return -EINVAL;
+ }
+ mixer->drm_primary_layer = &mixer->planes[id];
+ }
+ ret = xlnx_mix_init_plane(&mixer->planes[id], 1, layer_node);
+ if (ret)
+ dev_err(dev, "Unable to init drm mixer plane id = %u", id);
+
+ return ret;
+}
+
+static irqreturn_t xlnx_mix_intr_handler(int irq, void *data)
+{
+ struct xlnx_mix_hw *mixer = data;
+ u32 intr = xlnx_mix_get_intr_status(mixer);
+
+ if (!intr)
+ return IRQ_NONE;
+ if (mixer->intrpt_handler_fn)
+ mixer->intrpt_handler_fn(mixer->intrpt_data);
+ xlnx_mix_clear_intr_status(mixer, intr);
+
+ return IRQ_HANDLED;
+}
+
+static void xlnx_mix_create_plane_properties(struct xlnx_mix *mixer)
+{
+ mixer->scale_prop = drm_property_create_range(mixer->drm, 0, "scale",
+ XVMIX_SCALE_FACTOR_1X,
+ XVMIX_SCALE_FACTOR_4X);
+ mixer->alpha_prop = drm_property_create_range(mixer->drm, 0, "alpha",
+ XVMIX_ALPHA_MIN,
+ XVMIX_ALPHA_MAX);
+}
+
+static int xlnx_mix_plane_create(struct device *dev, struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ struct device_node *node, *layer_node;
+ char name[20];
+ struct xlnx_mix_layer_data *layer_data;
+ int ret, i;
+ int layer_idx;
+
+ node = dev->of_node;
+ mixer_hw = &mixer->mixer_hw;
+ xlnx_mix_create_plane_properties(mixer);
+
+ mixer->planes[XVMIX_MASTER_LAYER_IDX].mixer_layer =
+ &mixer_hw->layer_data[XVMIX_MASTER_LAYER_IDX];
+ mixer->planes[XVMIX_MASTER_LAYER_IDX].id = XVMIX_MASTER_LAYER_IDX;
+ mixer->hw_master_layer = &mixer->planes[XVMIX_MASTER_LAYER_IDX];
+
+ if (mixer_hw->logo_layer_en) {
+ mixer->planes[XVMIX_LOGO_LAYER_IDX].mixer_layer =
+ &mixer_hw->layer_data[XVMIX_LOGO_LAYER_IDX];
+ mixer->planes[XVMIX_LOGO_LAYER_IDX].id = XVMIX_LOGO_LAYER_IDX;
+ mixer->hw_logo_layer = &mixer->planes[XVMIX_LOGO_LAYER_IDX];
+ layer_node = of_get_child_by_name(node, "logo");
+ ret = xlnx_mix_init_plane(&mixer->planes[XVMIX_LOGO_LAYER_IDX],
+ 1, layer_node);
+ if (ret)
+ return ret;
+ }
+ layer_idx = mixer_hw->logo_layer_en ? 2 : 1;
+ for (i = 1; i < mixer_hw->num_layers; i++, layer_idx++) {
+ snprintf(name, sizeof(name), "layer_%d", i);
+ ret = xlnx_mix_of_init_layer(dev, node, name,
+ &mixer_hw->layer_data[layer_idx],
+ mixer_hw->max_layer_width,
+ mixer, layer_idx);
+ if (ret)
+ return ret;
+ }
+ /* If none of the overlay layers were designated as the drm
+ * primary layer, default to the mixer's video0 layer as drm primary
+ */
+ if (!mixer->drm_primary_layer)
+ mixer->drm_primary_layer = mixer->hw_master_layer;
+ layer_node = of_get_child_by_name(node, "layer_0");
+ ret = xlnx_mix_init_plane(&mixer->planes[XVMIX_MASTER_LAYER_IDX], 1,
+ layer_node);
+ /* request irq and obtain pixels-per-clock (ppc) property */
+ mixer_hw->irq = irq_of_parse_and_map(node, 0);
+ if (mixer_hw->irq > 0) {
+ ret = devm_request_irq(dev, mixer_hw->irq,
+ xlnx_mix_intr_handler,
+ IRQF_SHARED, "xlnx-mixer", mixer_hw);
+ if (ret) {
+ dev_err(dev, "Failed to request irq\n");
+ return ret;
+ }
+ }
+ ret = of_property_read_u32(node, "xlnx,ppc", &mixer_hw->ppc);
+ if (ret) {
+ dev_err(dev, "No xlnx,ppc property for mixer dts\n");
+ return ret;
+ }
+
+ mixer->max_width = XVMIX_DISP_MAX_WIDTH;
+ mixer->max_height = XVMIX_DISP_MAX_HEIGHT;
+ if (mixer->hw_logo_layer) {
+ layer_data = &mixer_hw->layer_data[XVMIX_LOGO_LAYER_IDX];
+ mixer->max_cursor_width = layer_data->hw_config.max_width;
+ mixer->max_cursor_height = layer_data->hw_config.max_height;
+ }
+ return 0;
+}
+
+/**
+ * xlnx_mix_plane_restore - Restore the plane states
+ * @mixer: mixer device core structure
+ *
+ * Restore the plane states to the default ones. Any state that needs to be
+ * restored should be here. This improves consistency as applications see
+ * the same default values, and removes mismatch between software and hardware
+ * values as software values are updated as hardware values are reset.
+ */
+static void xlnx_mix_plane_restore(struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_plane *plane;
+ unsigned int i;
+
+ if (!mixer)
+ return;
+ /*
+ * Reinitialize property default values as they get reset by DPMS OFF
+ * operation. User will read the correct default values later, and
+ * planes will be initialized with default values.
+ */
+ for (i = 0; i < mixer->num_planes; i++) {
+ plane = &mixer->planes[i];
+ if (!plane)
+ continue;
+ xlnx_mix_hw_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+ }
+}
+
+/**
+ * xlnx_mix_set_bkg_col - Set background color
+ * @mixer: Mixer instance to program with new background color
+ * @rgb_value: RGB encoded as 32-bit integer in little-endian format
+ *
+ * Set the color to be output as background color when background stream layer
+ */
+static void xlnx_mix_set_bkg_col(struct xlnx_mix_hw *mixer, u64 rgb_value)
+{
+ u32 bg_bpc = mixer->bg_layer_bpc;
+ u32 bpc_mask_shift = XVMIX_MAX_BPC - bg_bpc;
+ u32 val_mask = (GENMASK(15, 0) >> bpc_mask_shift);
+ u16 b_val = (rgb_value >> (bg_bpc * 2)) & val_mask;
+ u16 g_val = (rgb_value >> bg_bpc) & val_mask;
+ u16 r_val = (rgb_value >> 0) & val_mask;
+
+ /* Set Background Color */
+ reg_writel(mixer->base, XVMIX_BACKGROUND_Y_R_DATA, r_val);
+ reg_writel(mixer->base, XVMIX_BACKGROUND_U_G_DATA, g_val);
+ reg_writel(mixer->base, XVMIX_BACKGROUND_V_B_DATA, b_val);
+ mixer->bg_color = rgb_value;
+}
+
+/**
+ * xlnx_mix_reset - Reset the mixer core video generator
+ * @mixer: Mixer core instance for which to start video output
+ *
+ * Toggle the reset gpio and restores the bg color, plane and interrupt mask.
+ */
+static void xlnx_mix_reset(struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_hw *mixer_hw = &mixer->mixer_hw;
+
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 0);
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 1);
+ /* restore layer properties and bg color after reset */
+ xlnx_mix_set_bkg_col(mixer_hw, mixer_hw->bg_color);
+ xlnx_mix_plane_restore(mixer);
+ xlnx_mix_intrpt_enable_done(&mixer->mixer_hw);
+}
+
+static void xlnx_mix_dpms(struct xlnx_mix *mixer, int dpms)
+{
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ xlnx_mix_start(&mixer->mixer_hw);
+ break;
+ default:
+ xlnx_mix_stop(&mixer->mixer_hw);
+ mdelay(50); /* let IP shut down */
+ xlnx_mix_reset(mixer);
+ }
+}
+
+/* set crtc dpms */
+static void xlnx_mix_crtc_dpms(struct drm_crtc *base_crtc, int dpms)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+ int ret;
+ struct videomode vm;
+ struct drm_display_mode *mode = &base_crtc->mode;
+
+ DRM_DEBUG_KMS("dpms: %d\n", dpms);
+ if (mixer->dpms == dpms)
+ return;
+ mixer->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ if (!mixer->pixel_clock_enabled) {
+ ret = clk_prepare_enable(mixer->pixel_clock);
+ if (ret) {
+ DRM_ERROR("failed to enable a pixel clock\n");
+ mixer->pixel_clock_enabled = false;
+ }
+ }
+ mixer->pixel_clock_enabled = true;
+
+ if (mixer->vtc_bridge) {
+ drm_display_mode_to_videomode(mode, &vm);
+ xlnx_bridge_set_timing(mixer->vtc_bridge, &vm);
+ xlnx_bridge_enable(mixer->vtc_bridge);
+ }
+
+ xlnx_mix_dpms(mixer, dpms);
+ xlnx_mix_plane_dpms(base_crtc->primary, dpms);
+ break;
+ default:
+ xlnx_mix_plane_dpms(base_crtc->primary, dpms);
+ xlnx_mix_dpms(mixer, dpms);
+ xlnx_bridge_disable(mixer->vtc_bridge);
+ if (mixer->pixel_clock_enabled) {
+ clk_disable_unprepare(mixer->pixel_clock);
+ mixer->pixel_clock_enabled = false;
+ }
+ break;
+ }
+}
+
+static void xlnx_mix_set_intr_handler(struct xlnx_mix *mixer,
+ void (*intr_handler_fn)(void *),
+ void *data)
+{
+ mixer->mixer_hw.intrpt_handler_fn = intr_handler_fn;
+ mixer->mixer_hw.intrpt_data = data;
+}
+
+static void xlnx_mix_crtc_vblank_handler(void *data)
+{
+ struct drm_crtc *base_crtc = data;
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+ struct drm_device *drm = base_crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ drm_crtc_handle_vblank(base_crtc);
+ /* Finish page flip */
+ spin_lock_irqsave(&drm->event_lock, flags);
+ event = mixer->event;
+ mixer->event = NULL;
+ if (event) {
+ drm_crtc_send_vblank_event(base_crtc, event);
+ drm_crtc_vblank_put(base_crtc);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+static int xlnx_mix_crtc_enable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+
+ xlnx_mix_set_intr_handler(mixer, xlnx_mix_crtc_vblank_handler,
+ base_crtc);
+ return 0;
+}
+
+static void xlnx_mix_crtc_disable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+
+ mixer->mixer_hw.intrpt_handler_fn = NULL;
+ mixer->mixer_hw.intrpt_data = NULL;
+}
+
+static void xlnx_mix_crtc_destroy(struct drm_crtc *base_crtc)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+
+ /* make sure crtc is off */
+ mixer->alpha_prop = NULL;
+ mixer->scale_prop = NULL;
+ mixer->bg_color = NULL;
+ xlnx_mix_crtc_dpms(base_crtc, DRM_MODE_DPMS_OFF);
+
+ if (mixer->pixel_clock_enabled) {
+ clk_disable_unprepare(mixer->pixel_clock);
+ mixer->pixel_clock_enabled = false;
+ }
+ drm_crtc_cleanup(base_crtc);
+}
+
+static int
+xlnx_mix_disp_crtc_atomic_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static int
+xlnx_mix_disp_crtc_atomic_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ return 0;
+}
+
+static struct drm_crtc_funcs xlnx_mix_crtc_funcs = {
+ .destroy = xlnx_mix_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_set_property = xlnx_mix_disp_crtc_atomic_set_property,
+ .atomic_get_property = xlnx_mix_disp_crtc_atomic_get_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = xlnx_mix_crtc_enable_vblank,
+ .disable_vblank = xlnx_mix_crtc_disable_vblank,
+};
+
+static void
+xlnx_mix_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ xlnx_mix_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+/**
+ * xlnx_mix_clear_event - Clear any event if pending
+ * @crtc: DRM crtc object
+ *
+ */
+static void xlnx_mix_clear_event(struct drm_crtc *crtc)
+{
+ if (crtc->state->event) {
+ complete_all(crtc->state->event->base.completion);
+ crtc->state->event = NULL;
+ }
+}
+
+static void
+xlnx_mix_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ xlnx_mix_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ xlnx_mix_clear_event(crtc);
+}
+
+static void xlnx_mix_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+}
+
+static int xlnx_mix_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
+static void
+xlnx_mix_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ /* Don't rely on vblank when disabling crtc */
+ if (crtc->state->event) {
+ struct xlnx_crtc *xcrtc = to_xlnx_crtc(crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(xcrtc);
+
+ /* Consume the flip_done event from atomic helper */
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ mixer->event = crtc->state->event;
+ crtc->state->event = NULL;
+ }
+}
+
+static struct drm_crtc_helper_funcs xlnx_mix_crtc_helper_funcs = {
+ .atomic_enable = xlnx_mix_crtc_atomic_enable,
+ .atomic_disable = xlnx_mix_crtc_atomic_disable,
+ .mode_set_nofb = xlnx_mix_crtc_mode_set_nofb,
+ .atomic_check = xlnx_mix_crtc_atomic_check,
+ .atomic_begin = xlnx_mix_crtc_atomic_begin,
+};
+
+/**
+ * xlnx_mix_crtc_create - create crtc for mixer
+ * @mixer: xilinx video mixer object
+ *
+ * Return:
+ * Zero on success, error on failure
+ *
+ */
+static int xlnx_mix_crtc_create(struct xlnx_mix *mixer)
+{
+ struct xlnx_crtc *crtc;
+ struct drm_plane *primary_plane = NULL;
+ struct drm_plane *cursor_plane = NULL;
+ int ret, i;
+
+ crtc = &mixer->crtc;
+ primary_plane = &mixer->drm_primary_layer->base;
+ cursor_plane = &mixer->hw_logo_layer->base;
+
+ for (i = 0; i < mixer->num_planes; i++)
+ xlnx_mix_attach_plane_prop(&mixer->planes[i]);
+ mixer->pixel_clock = devm_clk_get(mixer->drm->dev, NULL);
+ if (IS_ERR(mixer->pixel_clock)) {
+ DRM_DEBUG_KMS("failed to get pixel clock\n");
+ mixer->pixel_clock = NULL;
+ }
+ ret = clk_prepare_enable(mixer->pixel_clock);
+ if (ret) {
+ DRM_ERROR("failed to enable a pixel clock\n");
+ mixer->pixel_clock_enabled = false;
+ goto err_plane;
+ }
+ mixer->pixel_clock_enabled = true;
+ /* initialize drm crtc */
+ ret = drm_crtc_init_with_planes(mixer->drm, &crtc->crtc,
+ &mixer->drm_primary_layer->base,
+ &mixer->hw_logo_layer->base,
+ &xlnx_mix_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize mixer crtc\n");
+ goto err_pixel_clk;
+ }
+ drm_crtc_helper_add(&crtc->crtc, &xlnx_mix_crtc_helper_funcs);
+ crtc->get_max_width = &xlnx_mix_crtc_get_max_width;
+ crtc->get_max_height = &xlnx_mix_crtc_get_max_height;
+ crtc->get_align = &xlnx_mix_crtc_get_align;
+ crtc->get_format = &xlnx_mix_crtc_get_format;
+ crtc->get_cursor_height = &xlnx_mix_crtc_get_max_cursor_height;
+ crtc->get_cursor_width = &xlnx_mix_crtc_get_max_cursor_width;
+ xlnx_crtc_register(mixer->drm, crtc);
+
+ return 0;
+
+err_pixel_clk:
+ if (mixer->pixel_clock_enabled) {
+ clk_disable_unprepare(mixer->pixel_clock);
+ mixer->pixel_clock_enabled = false;
+ }
+err_plane:
+ return ret;
+}
+
+/**
+ * xlnx_mix_init - Establishes a default power-on state for the mixer IP
+ * core
+ * @mixer: instance of IP core to initialize to a default state
+ *
+ * Background layer initialized to maximum height and width settings based on
+ * device tree properties and all overlay layers set to minimum height and width
+ * sizes and positioned to 0,0 in the crtc. All layers are inactive (resulting
+ * in video output being generated by the background color generator).
+ * Interrupts are disabled and the IP is started (with auto-restart enabled).
+ */
+static void xlnx_mix_init(struct xlnx_mix_hw *mixer)
+{
+ u32 i;
+ u32 bg_bpc = mixer->bg_layer_bpc;
+ u64 rgb_bg_clr = (0xFFFF >> (XVMIX_MAX_BPC - bg_bpc)) << (bg_bpc * 2);
+ enum xlnx_mix_layer_id layer_id;
+ struct xlnx_mix_layer_data *layer_data;
+
+ layer_data = xlnx_mix_get_layer_data(mixer, XVMIX_LAYER_MASTER);
+ xlnx_mix_layer_disable(mixer, mixer->max_layers);
+ xlnx_mix_set_active_area(mixer, layer_data->hw_config.max_width,
+ layer_data->hw_config.max_height);
+ /* default to blue */
+ xlnx_mix_set_bkg_col(mixer, rgb_bg_clr);
+
+ for (i = 0; i < mixer->layer_cnt; i++) {
+ layer_id = mixer->layer_data[i].id;
+ layer_data = &mixer->layer_data[i];
+ if (layer_id == XVMIX_LAYER_MASTER)
+ continue;
+ xlnx_mix_set_layer_window(mixer, layer_id, 0, 0,
+ XVMIX_LAYER_WIDTH_MIN,
+ XVMIX_LAYER_HEIGHT_MIN, 0);
+ if (layer_data->hw_config.can_scale)
+ xlnx_mix_set_layer_scaling(mixer, layer_id, 0);
+ if (layer_data->hw_config.can_alpha)
+ xlnx_mix_set_layer_alpha(mixer, layer_id,
+ XVMIX_ALPHA_MAX);
+ }
+ xlnx_mix_intrpt_enable_done(mixer);
+}
+
+static int xlnx_mix_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_mix *mixer = dev_get_drvdata(dev);
+ struct drm_device *drm = data;
+ u32 ret;
+
+ mixer->drm = drm;
+ ret = xlnx_mix_plane_create(dev, mixer);
+ if (ret)
+ return ret;
+ ret = xlnx_mix_crtc_create(mixer);
+ if (ret)
+ return ret;
+ xlnx_mix_init(&mixer->mixer_hw);
+
+ return ret;
+}
+
+static void xlnx_mix_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_mix *mixer = dev_get_drvdata(dev);
+
+ dev_set_drvdata(dev, NULL);
+ xlnx_mix_intrpt_disable(&mixer->mixer_hw);
+ xlnx_crtc_unregister(mixer->drm, &mixer->crtc);
+}
+
+static const struct component_ops xlnx_mix_component_ops = {
+ .bind = xlnx_mix_bind,
+ .unbind = xlnx_mix_unbind,
+};
+
+static int xlnx_mix_probe(struct platform_device *pdev)
+{
+ struct xlnx_mix *mixer;
+ int ret;
+
+ mixer = devm_kzalloc(&pdev->dev, sizeof(*mixer), GFP_KERNEL);
+ if (!mixer)
+ return -ENOMEM;
+
+ /* Sub-driver will access mixer from drvdata */
+ platform_set_drvdata(pdev, mixer);
+ ret = xlnx_mix_dt_parse(&pdev->dev, mixer);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to probe mixer\n");
+ return ret;
+ }
+
+ ret = component_add(&pdev->dev, &xlnx_mix_component_ops);
+ if (ret)
+ goto err;
+
+ mixer->master = xlnx_drm_pipeline_init(pdev);
+ if (IS_ERR(mixer->master)) {
+ dev_err(&pdev->dev, "Failed to initialize the drm pipeline\n");
+ goto err_component;
+ }
+
+ dev_info(&pdev->dev, "Xilinx Mixer driver probed success\n");
+ return ret;
+
+err_component:
+ component_del(&pdev->dev, &xlnx_mix_component_ops);
+err:
+ return ret;
+}
+
+static int xlnx_mix_remove(struct platform_device *pdev)
+{
+ struct xlnx_mix *mixer = platform_get_drvdata(pdev);
+
+ of_xlnx_bridge_put(mixer->vtc_bridge);
+ xlnx_drm_pipeline_exit(mixer->master);
+ component_del(&pdev->dev, &xlnx_mix_component_ops);
+ return 0;
+}
+
+/*
+ * TODO:
+ * In Mixer IP core version 4.0, layer enable bits and logo layer offsets
+ * have been changed. To provide backward compatibility number of max layers
+ * field has been taken to differentiate IP versions.
+ * This logic will have to be changed properly using the IP core version.
+ */
+
+static const struct of_device_id xlnx_mix_of_match[] = {
+ { .compatible = "xlnx,mixer-3.0", },
+ { .compatible = "xlnx,mixer-4.0", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xlnx_mix_of_match);
+
+static struct platform_driver xlnx_mix_driver = {
+ .probe = xlnx_mix_probe,
+ .remove = xlnx_mix_remove,
+ .driver = {
+ .name = "xlnx-mixer",
+ .of_match_table = xlnx_mix_of_match,
+ },
+};
+
+module_platform_driver(xlnx_mix_driver);
+
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_DESCRIPTION("Xilinx Mixer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_pl_disp.c b/drivers/gpu/drm/xlnx/xlnx_pl_disp.c
new file mode 100644
index 000000000000..a4de9b31a717
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_pl_disp.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM CRTC DMA engine driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author : Saurabh Sengar <saurabhs@xilinx.com>
+ * : Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <video/videomode.h>
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * This driver intends to support the display pipeline with DMA engine
+ * driver by initializing DRM crtc and plane objects. The driver makes
+ * an assumption that it's single plane pipeline, as multi-plane pipeline
+ * would require programing beyond the DMA engine interface.
+ */
+
+/**
+ * struct xlnx_dma_chan - struct for DMA engine
+ * @dma_chan: DMA channel
+ * @xt: Interleaved desc config container
+ * @sgl: Data chunk for dma_interleaved_template
+ */
+struct xlnx_dma_chan {
+ struct dma_chan *dma_chan;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+};
+
+/**
+ * struct xlnx_pl_disp - struct for display subsystem
+ * @dev: device structure
+ * @master: logical master device from xlnx drm
+ * @xlnx_crtc: Xilinx DRM driver crtc object
+ * @plane: base drm plane object
+ * @chan: struct for DMA engine
+ * @event: vblank pending event
+ * @callback: callback for registering DMA callback function
+ * @callback_param: parameter for passing to DMA callback function
+ * @drm: core drm object
+ * @fmt: drm color format
+ * @vtc_bridge: vtc_bridge structure
+ * @fid: field id
+ */
+struct xlnx_pl_disp {
+ struct device *dev;
+ struct platform_device *master;
+ struct xlnx_crtc xlnx_crtc;
+ struct drm_plane plane;
+ struct xlnx_dma_chan *chan;
+ struct drm_pending_vblank_event *event;
+ dma_async_tx_callback callback;
+ void *callback_param;
+ struct drm_device *drm;
+ u32 fmt;
+ struct xlnx_bridge *vtc_bridge;
+ u32 fid;
+};
+
+/*
+ * Xlnx crtc functions
+ */
+static inline struct xlnx_pl_disp *crtc_to_dma(struct xlnx_crtc *xlnx_crtc)
+{
+ return container_of(xlnx_crtc, struct xlnx_pl_disp, xlnx_crtc);
+}
+
+/**
+ * xlnx_pl_disp_complete - vblank handler
+ * @param: parameter to vblank handler
+ *
+ * This function handles the vblank interrupt, and sends an event to
+ * CRTC object.
+ */
+static void xlnx_pl_disp_complete(void *param)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = param;
+ struct drm_device *drm = xlnx_pl_disp->drm;
+
+ drm_handle_vblank(drm, 0);
+}
+
+/**
+ * xlnx_pl_disp_get_format - Get the current display pipeline format
+ * @xlnx_crtc: xlnx crtc object
+ *
+ * Get the current format of pipeline
+ *
+ * Return: the corresponding DRM_FORMAT_XXX
+ */
+static uint32_t xlnx_pl_disp_get_format(struct xlnx_crtc *xlnx_crtc)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ return xlnx_pl_disp->fmt;
+}
+
+/**
+ * xlnx_pl_disp_get_align - Get the alignment value for pitch
+ * @xlnx_crtc: xlnx crtc object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+static unsigned int xlnx_pl_disp_get_align(struct xlnx_crtc *xlnx_crtc)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ return 1 << xlnx_pl_disp->chan->dma_chan->device->copy_align;
+}
+
+/*
+ * DRM plane functions
+ */
+static inline struct xlnx_pl_disp *plane_to_dma(struct drm_plane *plane)
+{
+ return container_of(plane, struct xlnx_pl_disp, plane);
+}
+
+/**
+ * xlnx_pl_disp_plane_disable - Disables DRM plane
+ * @plane: DRM plane object
+ *
+ * Disable the DRM plane, by stopping the corrosponding DMA
+ */
+static void xlnx_pl_disp_plane_disable(struct drm_plane *plane)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+
+ dmaengine_terminate_sync(xlnx_dma_chan->dma_chan);
+}
+
+/**
+ * xlnx_pl_disp_plane_enable - Enables DRM plane
+ * @plane: DRM plane object
+ *
+ * Enable the DRM plane, by enabling the corresponding DMA
+ */
+static void xlnx_pl_disp_plane_enable(struct drm_plane *plane)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+ struct dma_chan *dma_chan = xlnx_dma_chan->dma_chan;
+ struct dma_interleaved_template *xt = &xlnx_dma_chan->xt;
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma_chan, xt, flags);
+ if (!desc) {
+ dev_err(xlnx_pl_disp->dev,
+ "failed to prepare DMA descriptor\n");
+ return;
+ }
+ desc->callback = xlnx_pl_disp->callback;
+ desc->callback_param = xlnx_pl_disp->callback_param;
+ xilinx_xdma_set_earlycb(xlnx_dma_chan->dma_chan, desc, EARLY_CALLBACK);
+
+ if (plane->state->fb->flags == DRM_MODE_FB_ALTERNATE_TOP ||
+ plane->state->fb->flags == DRM_MODE_FB_ALTERNATE_BOTTOM) {
+ if (plane->state->fb->flags == DRM_MODE_FB_ALTERNATE_TOP)
+ xlnx_pl_disp->fid = 1;
+ else
+ xlnx_pl_disp->fid = 0;
+
+ xilinx_xdma_set_fid(xlnx_dma_chan->dma_chan, desc,
+ xlnx_pl_disp->fid);
+ }
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(xlnx_dma_chan->dma_chan);
+}
+
+static void xlnx_pl_disp_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ xlnx_pl_disp_plane_disable(plane);
+}
+
+static int xlnx_pl_disp_plane_mode_set(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, uint32_t src_y,
+ u32 src_w, uint32_t src_h)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+ const struct drm_format_info *info = fb->format;
+ dma_addr_t luma_paddr, chroma_paddr;
+ size_t stride;
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+
+ if (info->num_planes > 2) {
+ dev_err(xlnx_pl_disp->dev, "Color format not supported\n");
+ return -EINVAL;
+ }
+ luma_paddr = drm_fb_cma_get_gem_addr(fb, plane->state, 0);
+ if (!luma_paddr) {
+ dev_err(xlnx_pl_disp->dev, "failed to get luma paddr\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(xlnx_pl_disp->dev, "num planes = %d\n", info->num_planes);
+ xlnx_dma_chan->xt.numf = src_h;
+ xlnx_dma_chan->sgl[0].size = drm_format_plane_width_bytes(info,
+ 0, src_w);
+ xlnx_dma_chan->sgl[0].icg = fb->pitches[0] - xlnx_dma_chan->sgl[0].size;
+ xlnx_dma_chan->xt.src_start = luma_paddr;
+ xlnx_dma_chan->xt.frame_size = info->num_planes;
+ xlnx_dma_chan->xt.dir = DMA_MEM_TO_DEV;
+ xlnx_dma_chan->xt.src_sgl = true;
+ xlnx_dma_chan->xt.dst_sgl = false;
+
+ /* Do we have a video format aware dma channel?
+ * so, modify descriptor accordingly. Hueristic test:
+ * we have a multi-plane format but only one dma channel
+ */
+ if (info->num_planes > 1) {
+ chroma_paddr = drm_fb_cma_get_gem_addr(fb, plane->state, 1);
+ if (!chroma_paddr) {
+ dev_err(xlnx_pl_disp->dev,
+ "failed to get chroma paddr\n");
+ return -EINVAL;
+ }
+ stride = xlnx_dma_chan->sgl[0].size +
+ xlnx_dma_chan->sgl[0].icg;
+ xlnx_dma_chan->sgl[0].src_icg = chroma_paddr -
+ xlnx_dma_chan->xt.src_start -
+ (xlnx_dma_chan->xt.numf * stride);
+ }
+
+ return 0;
+}
+
+static void xlnx_pl_disp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ int ret;
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+
+ ret = xlnx_pl_disp_plane_mode_set(plane,
+ plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret) {
+ dev_err(xlnx_pl_disp->dev, "failed to mode set a plane\n");
+ return;
+ }
+ /* in case frame buffer is used set the color format */
+ xilinx_xdma_drm_config(xlnx_pl_disp->chan->dma_chan,
+ xlnx_pl_disp->plane.state->fb->format->format);
+ /* apply the new fb addr and enable */
+ xlnx_pl_disp_plane_enable(plane);
+}
+
+static int
+xlnx_pl_disp_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_atomic_state *state = new_plane_state->state;
+ const struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
+ const struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
+
+ if (!crtc)
+ return 0;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ /* plane must be enabled when state is active */
+ if (new_crtc_state->active && !new_plane_state->crtc)
+ return -EINVAL;
+
+ /*
+ * This check is required to call modeset if there is a change in color
+ * format
+ */
+ if (new_plane_state->fb && old_plane_state->fb &&
+ new_plane_state->fb->format->format !=
+ old_plane_state->fb->format->format)
+ new_crtc_state->mode_changed = true;
+
+ return 0;
+}
+
+static const struct drm_plane_helper_funcs xlnx_pl_disp_plane_helper_funcs = {
+ .atomic_update = xlnx_pl_disp_plane_atomic_update,
+ .atomic_disable = xlnx_pl_disp_plane_atomic_disable,
+ .atomic_check = xlnx_pl_disp_plane_atomic_check,
+};
+
+static struct drm_plane_funcs xlnx_pl_disp_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static inline struct xlnx_pl_disp *drm_crtc_to_dma(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+
+ return crtc_to_dma(xlnx_crtc);
+}
+
+static void xlnx_pl_disp_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ drm_crtc_vblank_on(crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ /* Consume the flip_done event from atomic helper */
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static void xlnx_pl_disp_clear_event(struct drm_crtc *crtc)
+{
+ if (crtc->state->event) {
+ complete_all(crtc->state->event->base.completion);
+ crtc->state->event = NULL;
+ }
+}
+
+static void xlnx_pl_disp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ int vrefresh;
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+ struct videomode vm;
+
+ if (xlnx_pl_disp->vtc_bridge) {
+ /* set video timing */
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
+ xlnx_bridge_set_timing(xlnx_pl_disp->vtc_bridge, &vm);
+ xlnx_bridge_enable(xlnx_pl_disp->vtc_bridge);
+ }
+
+ xlnx_pl_disp_plane_enable(crtc->primary);
+
+ /* Delay of 1 vblank interval for timing gen to be stable */
+ vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+ msleep(1 * 1000 / vrefresh);
+}
+
+static void xlnx_pl_disp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ xlnx_pl_disp_plane_disable(crtc->primary);
+ xlnx_pl_disp_clear_event(crtc);
+ drm_crtc_vblank_off(crtc);
+ xlnx_bridge_disable(xlnx_pl_disp->vtc_bridge);
+}
+
+static int xlnx_pl_disp_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
+static struct drm_crtc_helper_funcs xlnx_pl_disp_crtc_helper_funcs = {
+ .atomic_enable = xlnx_pl_disp_crtc_atomic_enable,
+ .atomic_disable = xlnx_pl_disp_crtc_atomic_disable,
+ .atomic_check = xlnx_pl_disp_crtc_atomic_check,
+ .atomic_begin = xlnx_pl_disp_crtc_atomic_begin,
+};
+
+static void xlnx_pl_disp_crtc_destroy(struct drm_crtc *crtc)
+{
+ xlnx_pl_disp_plane_disable(crtc->primary);
+ drm_crtc_cleanup(crtc);
+}
+
+static int xlnx_pl_disp_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ /*
+ * Use the complete callback for vblank event assuming the dma engine
+ * starts on the next descriptor upon this event. This may not be safe
+ * assumption for some dma engines.
+ */
+ xlnx_pl_disp->callback = xlnx_pl_disp_complete;
+ xlnx_pl_disp->callback_param = xlnx_pl_disp;
+
+ return 0;
+}
+
+static void xlnx_pl_disp_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ xlnx_pl_disp->callback = NULL;
+ xlnx_pl_disp->callback_param = NULL;
+}
+
+static struct drm_crtc_funcs xlnx_pl_disp_crtc_funcs = {
+ .destroy = xlnx_pl_disp_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = xlnx_pl_disp_crtc_enable_vblank,
+ .disable_vblank = xlnx_pl_disp_crtc_disable_vblank,
+};
+
+static int xlnx_pl_disp_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = data;
+ struct xlnx_pl_disp *xlnx_pl_disp = dev_get_drvdata(dev);
+ int ret;
+ u32 *fmts = NULL;
+ unsigned int num_fmts = 0;
+
+ /* in case of fb IP query the supported formats and there count */
+ xilinx_xdma_get_drm_vid_fmts(xlnx_pl_disp->chan->dma_chan,
+ &num_fmts, &fmts);
+ ret = drm_universal_plane_init(drm, &xlnx_pl_disp->plane, 0,
+ &xlnx_pl_disp_plane_funcs,
+ fmts ? fmts : &xlnx_pl_disp->fmt,
+ num_fmts ? num_fmts : 1,
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(&xlnx_pl_disp->plane,
+ &xlnx_pl_disp_plane_helper_funcs);
+
+ ret = drm_crtc_init_with_planes(drm, &xlnx_pl_disp->xlnx_crtc.crtc,
+ &xlnx_pl_disp->plane, NULL,
+ &xlnx_pl_disp_crtc_funcs, NULL);
+ if (ret) {
+ drm_plane_cleanup(&xlnx_pl_disp->plane);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&xlnx_pl_disp->xlnx_crtc.crtc,
+ &xlnx_pl_disp_crtc_helper_funcs);
+ xlnx_pl_disp->xlnx_crtc.get_format = &xlnx_pl_disp_get_format;
+ xlnx_pl_disp->xlnx_crtc.get_align = &xlnx_pl_disp_get_align;
+ xlnx_pl_disp->drm = drm;
+ xlnx_crtc_register(xlnx_pl_disp->drm, &xlnx_pl_disp->xlnx_crtc);
+
+ return 0;
+}
+
+static void xlnx_pl_disp_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = dev_get_drvdata(dev);
+
+ drm_plane_cleanup(&xlnx_pl_disp->plane);
+ drm_crtc_cleanup(&xlnx_pl_disp->xlnx_crtc.crtc);
+}
+
+static const struct component_ops xlnx_pl_disp_component_ops = {
+ .bind = xlnx_pl_disp_bind,
+ .unbind = xlnx_pl_disp_unbind,
+};
+
+static int xlnx_pl_disp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *vtc_node;
+ struct xlnx_pl_disp *xlnx_pl_disp;
+ int ret;
+ const char *vformat;
+ struct dma_chan *dma_chan;
+ struct xlnx_dma_chan *xlnx_dma_chan;
+
+ xlnx_pl_disp = devm_kzalloc(dev, sizeof(*xlnx_pl_disp), GFP_KERNEL);
+ if (!xlnx_pl_disp)
+ return -ENOMEM;
+
+ dma_chan = of_dma_request_slave_channel(dev->of_node, "dma0");
+ if (IS_ERR_OR_NULL(dma_chan)) {
+ dev_err(dev, "failed to request dma channel\n");
+ return PTR_ERR(dma_chan);
+ }
+
+ xlnx_dma_chan = devm_kzalloc(dev, sizeof(*xlnx_dma_chan), GFP_KERNEL);
+ if (!xlnx_dma_chan)
+ return -ENOMEM;
+
+ xlnx_dma_chan->dma_chan = dma_chan;
+ xlnx_pl_disp->chan = xlnx_dma_chan;
+ ret = of_property_read_string(dev->of_node, "xlnx,vformat", &vformat);
+ if (ret) {
+ dev_err(dev, "No xlnx,vformat value in dts\n");
+ goto err_dma;
+ }
+
+ strcpy((char *)&xlnx_pl_disp->fmt, vformat);
+
+ /* VTC Bridge support */
+ vtc_node = of_parse_phandle(dev->of_node, "xlnx,bridge", 0);
+ if (vtc_node) {
+ xlnx_pl_disp->vtc_bridge = of_xlnx_bridge_get(vtc_node);
+ if (!xlnx_pl_disp->vtc_bridge) {
+ dev_info(dev, "Didn't get vtc bridge instance\n");
+ return -EPROBE_DEFER;
+ }
+ } else {
+ dev_info(dev, "vtc bridge property not present\n");
+ }
+
+ xlnx_pl_disp->dev = dev;
+ platform_set_drvdata(pdev, xlnx_pl_disp);
+
+ ret = component_add(dev, &xlnx_pl_disp_component_ops);
+ if (ret)
+ goto err_dma;
+
+ xlnx_pl_disp->master = xlnx_drm_pipeline_init(pdev);
+ if (IS_ERR(xlnx_pl_disp->master)) {
+ ret = PTR_ERR(xlnx_pl_disp->master);
+ dev_err(dev, "failed to initialize the drm pipeline\n");
+ goto err_component;
+ }
+
+ dev_info(&pdev->dev, "Xlnx PL display driver probed\n");
+
+ return 0;
+
+err_component:
+ component_del(dev, &xlnx_pl_disp_component_ops);
+err_dma:
+ dma_release_channel(xlnx_pl_disp->chan->dma_chan);
+
+ return ret;
+}
+
+static int xlnx_pl_disp_remove(struct platform_device *pdev)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = platform_get_drvdata(pdev);
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+
+ of_xlnx_bridge_put(xlnx_pl_disp->vtc_bridge);
+ xlnx_drm_pipeline_exit(xlnx_pl_disp->master);
+ component_del(&pdev->dev, &xlnx_pl_disp_component_ops);
+
+ /* Make sure the channel is terminated before release */
+ dmaengine_terminate_sync(xlnx_dma_chan->dma_chan);
+ dma_release_channel(xlnx_dma_chan->dma_chan);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_pl_disp_of_match[] = {
+ { .compatible = "xlnx,pl-disp"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_pl_disp_of_match);
+
+static struct platform_driver xlnx_pl_disp_driver = {
+ .probe = xlnx_pl_disp_probe,
+ .remove = xlnx_pl_disp_remove,
+ .driver = {
+ .name = "xlnx-pl-disp",
+ .of_match_table = xlnx_pl_disp_of_match,
+ },
+};
+
+module_platform_driver(xlnx_pl_disp_driver);
+
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_DESCRIPTION("Xilinx DRM Display Driver for PL IPs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_scaler.c b/drivers/gpu/drm/xlnx/xlnx_scaler.c
new file mode 100644
index 000000000000..9d20671c8c83
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_scaler.c
@@ -0,0 +1,1748 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VPSS SCALER DRM bridge driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar rao G <vgannava@xilinx.com>
+ * Rohit Athavale <rathavale@xilinx.com>
+ */
+
+/*
+ * Overview:
+ * This experimentatl driver works as a bridge driver and
+ * reused the code from V4L2.
+ * TODO:
+ * Need to implement in a modular approach to share driver code between
+ * V4L2 and DRM frameworks.
+ * Should be integrated with plane.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <uapi/linux/media-bus-format.h>
+
+#include "xlnx_bridge.h"
+
+#define XSCALER_MAX_WIDTH (3840)
+#define XSCALER_MAX_HEIGHT (2160)
+#define XSCALER_MAX_PHASES (64)
+#define XSCALER_MIN_WIDTH (64)
+#define XSCALER_MIN_HEIGHT (64)
+
+/* Video subsytems block offset */
+#define S_AXIS_RESET_OFF (0x00010000)
+#define V_HSCALER_OFF (0x00000000)
+#define V_VSCALER_OFF (0x00020000)
+
+/* HW Reset Network GPIO Channel */
+#define XGPIO_CH_RESET_SEL (1)
+#define XGPIO_RESET_MASK_VIDEO_IN BIT(0)
+#define XGPIO_RESET_MASK_IP_AXIS BIT(1)
+#define XGPIO_RESET_MASK_ALL_BLOCKS (XGPIO_RESET_MASK_VIDEO_IN | \
+ XGPIO_RESET_MASK_IP_AXIS)
+#define XGPIO_DATA_OFFSET (0x0)
+#define XGPIO_DATA2_OFFSET (0x8)
+#define XGPIO_TRI2_OFFSET (0xc)
+
+#define XGPIO_ISR_OFFSET (0x120)
+#define XGPIO_IER_OFFSET (0x128)
+#define XGPIO_CHAN_OFFSET (8)
+#define STEP_PRECISION (65536)
+
+/* SCALER POWER MACROS */
+#define XSCALER_RESET_ASSERT (0x1)
+#define XSCALER_RESET_DEASSERT (0x0)
+
+/* Video IP PPC */
+#define XSCALER_PPC_1 (1)
+#define XSCALER_PPC_2 (2)
+
+#define XV_HSCALER_MAX_H_TAPS (12)
+#define XV_HSCALER_MAX_H_PHASES (64)
+#define XV_HSCALER_MAX_LINE_WIDTH (3840)
+#define XV_VSCALER_MAX_V_TAPS (12)
+#define XV_VSCALER_MAX_V_PHASES (64)
+
+#define XV_HSCALER_TAPS_2 (2)
+#define XV_HSCALER_TAPS_4 (4)
+#define XV_HSCALER_TAPS_6 (6)
+#define XV_HSCALER_TAPS_8 (8)
+#define XV_HSCALER_TAPS_10 (10)
+#define XV_HSCALER_TAPS_12 (12)
+#define XV_VSCALER_TAPS_2 (2)
+#define XV_VSCALER_TAPS_4 (4)
+#define XV_VSCALER_TAPS_6 (6)
+#define XV_VSCALER_TAPS_8 (8)
+#define XV_VSCALER_TAPS_10 (10)
+#define XV_VSCALER_TAPS_12 (12)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XHSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XHSC_MASK_HIGH_16BITS GENMASK(31, 16)
+#define XHSC_MASK_LOW_32BITS GENMASK(31, 0)
+#define XHSC_STEP_PRECISION_SHIFT (16)
+#define XHSC_HPHASE_SHIFT_BY_6 (6)
+#define XHSC_HPHASE_MULTIPLIER (9)
+#define XSCALER_BITSHIFT_16 (16)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XVSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XVSC_MASK_HIGH_16BITS GENMASK(31, 16)
+
+/* Scaler AP Control Registers */
+#define XSCALER_START BIT(0)
+#define XSCALER_AUTO_RESTART BIT(7)
+#define XSCALER_STREAM_ON (XSCALER_START | XSCALER_AUTO_RESTART)
+
+/* H-scaler registers */
+#define XV_HSCALER_CTRL_ADDR_AP_CTRL (0x0000)
+#define XV_HSCALER_CTRL_ADDR_GIE (0x0004)
+#define XV_HSCALER_CTRL_ADDR_IER (0x0008)
+#define XV_HSCALER_CTRL_ADDR_ISR (0x000c)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA (0x0010)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA (0x0018)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA (0x0020)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x0028)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA (0x0030)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA (0X0038)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE (0x0800)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_HIGH (0x0bff)
+
+#define XV_HSCALER_CTRL_WIDTH_HWREG_HFLTCOEFF (16)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_HFLTCOEFF (384)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE (0x2000)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_HIGH (0x3fff)
+#define XV_HSCALER_CTRL_WIDTH_HWREG_PHASESH_V (18)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_PHASESH_V (1920)
+
+/* H-scaler masks */
+#define XV_HSCALER_PHASESH_V_OUTPUT_WR_EN BIT(8)
+
+/* V-scaler registers */
+#define XV_VSCALER_CTRL_ADDR_AP_CTRL (0x000)
+#define XV_VSCALER_CTRL_ADDR_GIE (0x004)
+#define XV_VSCALER_CTRL_ADDR_IER (0x008)
+#define XV_VSCALER_CTRL_ADDR_ISR (0x00c)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA (0x010)
+#define XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA (0x018)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA (0x020)
+#define XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA (0x028)
+#define XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x030)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE (0x800)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_HIGH (0xbff)
+
+/* H-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const u16
+xhsc_coeff_taps6[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { -132, 236, 3824, 236, -132, 64, },
+ { -116, 184, 3816, 292, -144, 64, },
+ { -100, 132, 3812, 348, -160, 64, },
+ { -88, 84, 3808, 404, -176, 64, },
+ { -72, 36, 3796, 464, -192, 64, },
+ { -60, -8, 3780, 524, -208, 68, },
+ { -48, -52, 3768, 588, -228, 68, },
+ { -32, -96, 3748, 652, -244, 68, },
+ { -20, -136, 3724, 716, -260, 72, },
+ { -8, -172, 3696, 784, -276, 72, },
+ { 0, -208, 3676, 848, -292, 72, },
+ { 12, -244, 3640, 920, -308, 76, },
+ { 20, -276, 3612, 988, -324, 76, },
+ { 32, -304, 3568, 1060, -340, 80, },
+ { 40, -332, 3532, 1132, -356, 80, },
+ { 48, -360, 3492, 1204, -372, 84, },
+ { 56, -384, 3448, 1276, -388, 88, },
+ { 64, -408, 3404, 1352, -404, 88, },
+ { 72, -428, 3348, 1428, -416, 92, },
+ { 76, -448, 3308, 1500, -432, 92, },
+ { 84, -464, 3248, 1576, -444, 96, },
+ { 88, -480, 3200, 1652, -460, 96, },
+ { 92, -492, 3140, 1728, -472, 100, },
+ { 96, -504, 3080, 1804, -484, 104, },
+ { 100, -516, 3020, 1880, -492, 104, },
+ { 104, -524, 2956, 1960, -504, 104, },
+ { 104, -532, 2892, 2036, -512, 108, },
+ { 108, -540, 2832, 2108, -520, 108, },
+ { 108, -544, 2764, 2184, -528, 112, },
+ { 112, -544, 2688, 2260, -532, 112, },
+ { 112, -548, 2624, 2336, -540, 112, },
+ { 112, -548, 2556, 2408, -544, 112, },
+ { 112, -544, 2480, 2480, -544, 112, },
+ { 112, -544, 2408, 2556, -548, 112, },
+ { 112, -540, 2336, 2624, -548, 112, },
+ { 112, -532, 2260, 2688, -544, 112, },
+ { 112, -528, 2184, 2764, -544, 108, },
+ { 108, -520, 2108, 2832, -540, 108, },
+ { 108, -512, 2036, 2892, -532, 104, },
+ { 104, -504, 1960, 2956, -524, 104, },
+ { 104, -492, 1880, 3020, -516, 100, },
+ { 104, -484, 1804, 3080, -504, 96, },
+ { 100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const u16
+xhsc_coeff_taps8[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const u16
+xhsc_coeff_taps10[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const u16
+xhsc_coeff_taps12[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
+
+/* V-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const u16
+xvsc_coeff_taps6[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_6] = {
+ {-132, 236, 3824, 236, -132, 64, },
+ {-116, 184, 3816, 292, -144, 64, },
+ {-100, 132, 3812, 348, -160, 64, },
+ {-88, 84, 3808, 404, -176, 64, },
+ {-72, 36, 3796, 464, -192, 64, },
+ {-60, -8, 3780, 524, -208, 68, },
+ {-48, -52, 3768, 588, -228, 68, },
+ {-32, -96, 3748, 652, -244, 68, },
+ {-20, -136, 3724, 716, -260, 72, },
+ {-8, -172, 3696, 784, -276, 72, },
+ {0, -208, 3676, 848, -292, 72, },
+ {12, -244, 3640, 920, -308, 76, },
+ {20, -276, 3612, 988, -324, 76, },
+ {32, -304, 3568, 1060, -340, 80, },
+ {40, -332, 3532, 1132, -356, 80, },
+ {48, -360, 3492, 1204, -372, 84, },
+ {56, -384, 3448, 1276, -388, 88, },
+ {64, -408, 3404, 1352, -404, 88, },
+ {72, -428, 3348, 1428, -416, 92, },
+ {76, -448, 3308, 1500, -432, 92, },
+ {84, -464, 3248, 1576, -444, 96, },
+ {88, -480, 3200, 1652, -460, 96, },
+ {92, -492, 3140, 1728, -472, 100, },
+ {96, -504, 3080, 1804, -484, 104, },
+ {100, -516, 3020, 1880, -492, 104, },
+ {104, -524, 2956, 1960, -504, 104, },
+ {104, -532, 2892, 2036, -512, 108, },
+ {108, -540, 2832, 2108, -520, 108, },
+ {108, -544, 2764, 2184, -528, 112, },
+ {112, -544, 2688, 2260, -532, 112, },
+ {112, -548, 2624, 2336, -540, 112, },
+ {112, -548, 2556, 2408, -544, 112, },
+ {112, -544, 2480, 2480, -544, 112, },
+ {112, -544, 2408, 2556, -548, 112, },
+ {112, -540, 2336, 2624, -548, 112, },
+ {112, -532, 2260, 2688, -544, 112, },
+ {112, -528, 2184, 2764, -544, 108, },
+ {108, -520, 2108, 2832, -540, 108, },
+ {108, -512, 2036, 2892, -532, 104, },
+ {104, -504, 1960, 2956, -524, 104, },
+ {104, -492, 1880, 3020, -516, 100, },
+ {104, -484, 1804, 3080, -504, 96, },
+ {100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const u16
+xvsc_coeff_taps8[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const u16
+xvsc_coeff_taps10[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const u16
+xvsc_coeff_taps12[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
+
+enum xilinx_scaler_vid_reg_fmts {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+static const u32 xilinx_scaler_video_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_VUY8_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_VYYUYY8_1X24,
+};
+
+/**
+ * struct xilinx_scaler - Core configuration of scaler device structure
+ * @base: pointer to register base address
+ * @dev: device structure
+ * @bridge: xilinx bridge
+ * @width_in: input width
+ * @height_in: input height
+ * @width_out: output width
+ * @height_out: output height
+ * @fmt_in: input format
+ * @fmt_out: output format
+ * @num_hori_taps: number of horizontal taps
+ * @num_vert_taps: number of vertical taps
+ * @max_num_phases: maximum number of phases
+ * @pix_per_clk: Pixels per Clock cycle the IP operates upon
+ * @max_pixels: The maximum number of pixels that the H-scaler examines
+ * @max_lines: The maximum number of lines that the V-scaler examines
+ * @H_phases: The phases needed to program the H-scaler for different taps
+ * @hscaler_coeff: The complete array of H-scaler coefficients
+ * @vscaler_coeff: The complete array of V-scaler coefficients
+ * @is_polyphase: Track if scaling algorithm is polyphase or not
+ * @rst_gpio: GPIO reset line to bring VPSS Scaler out of reset
+ * @ctrl_clk: AXI Lite clock
+ * @axis_clk: Video Clock
+ */
+struct xilinx_scaler {
+ void __iomem *base;
+ struct device *dev;
+ struct xlnx_bridge bridge;
+ u32 width_in;
+ u32 height_in;
+ u32 width_out;
+ u32 height_out;
+ u32 fmt_in;
+ u32 fmt_out;
+ u32 num_hori_taps;
+ u32 num_vert_taps;
+ u32 max_num_phases;
+ u32 pix_per_clk;
+ u32 max_pixels;
+ u32 max_lines;
+ u32 H_phases[XV_HSCALER_MAX_LINE_WIDTH];
+ short hscaler_coeff[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_MAX_H_TAPS];
+ short vscaler_coeff[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_MAX_V_TAPS];
+ bool is_polyphase;
+ struct gpio_desc *rst_gpio;
+ struct clk *ctrl_clk;
+ struct clk *axis_clk;
+};
+
+static inline void xilinx_scaler_write(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xilinx_scaler_read(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void xilinx_scaler_clr(void __iomem *base, u32 offset, u32 clr)
+{
+ xilinx_scaler_write(base, offset,
+ xilinx_scaler_read(base, offset) & ~clr);
+}
+
+static inline void xilinx_scaler_set(void __iomem *base, u32 offset, u32 set)
+{
+ xilinx_scaler_write(base, offset,
+ xilinx_scaler_read(base, offset) | set);
+}
+
+static inline void
+xilinx_scaler_disable_block(struct xilinx_scaler *scaler, u32 channel,
+ u32 ip_block)
+{
+ xilinx_scaler_clr(scaler->base, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF, ip_block);
+}
+
+static inline void
+xilinx_scaler_enable_block(struct xilinx_scaler *scaler, u32 channel,
+ u32 ip_block)
+{
+ xilinx_scaler_set(scaler->base, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF, ip_block);
+}
+
+/**
+ * bridge_to_layer - Gets the parent structure
+ * @bridge: pointer to the member.
+ *
+ * Return: parent structure pointer
+ */
+static inline struct xilinx_scaler *bridge_to_layer(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct xilinx_scaler, bridge);
+}
+
+/**
+ * xilinx_scaler_reset - Resets scaler block
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function resets scaler block
+ */
+static void xilinx_scaler_reset(struct xilinx_scaler *scaler)
+{
+ xilinx_scaler_disable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_ALL_BLOCKS);
+ xilinx_scaler_enable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_IP_AXIS);
+}
+
+/**
+ * xv_hscaler_calculate_phases - Calculates h-scaler phases
+ * @scaler: Pointer to scaler registers base
+ * @width_in: input width
+ * @width_out: output width
+ * @pixel_rate: pixel rate
+ */
+static void
+xv_hscaler_calculate_phases(struct xilinx_scaler *scaler,
+ u32 width_in, u32 width_out, u32 pixel_rate)
+{
+ unsigned int loop_width;
+ unsigned int x, s;
+ int offset = 0;
+ int xwrite_pos = 0;
+ bool output_write_en;
+ bool get_new_pix;
+ u64 phaseH;
+ u32 array_idx = 0;
+ int nr_rds;
+ int nr_rds_clck;
+ unsigned int nphases = scaler->max_num_phases;
+ unsigned int nppc = scaler->pix_per_clk;
+ unsigned int shift = XHSC_STEP_PRECISION_SHIFT - ilog2(nphases);
+
+ loop_width = max_t(u32, width_in, width_out);
+ loop_width = ALIGN(loop_width + nppc - 1, nppc);
+
+ for (x = 0; x < loop_width; x++) {
+ nr_rds_clck = 0;
+ for (s = 0; s < nppc; s++) {
+ phaseH = (offset >> shift) & (nphases - 1);
+ get_new_pix = false;
+ output_write_en = false;
+ if ((offset >> XHSC_STEP_PRECISION_SHIFT) != 0) {
+ get_new_pix = true;
+ offset -= (1 << XHSC_STEP_PRECISION_SHIFT);
+ array_idx++;
+ }
+
+ if (((offset >> XHSC_STEP_PRECISION_SHIFT) == 0) &&
+ xwrite_pos < width_out) {
+ offset += pixel_rate;
+ output_write_en = true;
+ xwrite_pos++;
+ }
+
+ scaler->H_phases[x] |= (phaseH <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ scaler->H_phases[x] |= (array_idx <<
+ (XHSC_HPHASE_SHIFT_BY_6 +
+ (s * XHSC_HPHASE_MULTIPLIER)));
+ if (output_write_en) {
+ scaler->H_phases[x] |=
+ (XV_HSCALER_PHASESH_V_OUTPUT_WR_EN <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ }
+
+ if (get_new_pix)
+ nr_rds_clck++;
+ }
+ if (array_idx >= nppc)
+ array_idx &= (nppc - 1);
+
+ nr_rds += nr_rds_clck;
+ if (nr_rds >= nppc)
+ nr_rds -= nppc;
+ }
+}
+
+/**
+ * xv_hscaler_load_ext_coeff - Loads external coefficients of h-scaler
+ * @scaler: Pointer to scaler registers base
+ * @coeff: Pointer to coeff array
+ * @ntaps: number of taps
+ *
+ * This function loads h-scaler coefficients.
+ */
+static void
+xv_hscaler_load_ext_coeff(struct xilinx_scaler *scaler,
+ const short *coeff, u32 ntaps)
+{
+ unsigned int i, j, pad, offset;
+ u32 nphases = scaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_HSCALER_MAX_H_TAPS - ntaps;
+ offset = pad >> 1;
+ /* Load coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ scaler->hscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+
+ if (pad) {
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < offset; j++)
+ scaler->hscaler_coeff[i][j] = 0;
+ j = ntaps + offset;
+ for (; j < XV_HSCALER_MAX_H_TAPS; j++)
+ scaler->hscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+/**
+ * xv_hscaler_coeff_select - Selection of H-Scaler coefficients of operation
+ * @scaler: Pointer to Scaler device structure
+ * @width_in: Width of input video
+ * @width_out: Width of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 12-tap
+ * filter may operate with 10 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * H-scaler number of taps.
+ */
+static int
+xv_hscaler_select_coeff(struct xilinx_scaler *scaler,
+ u32 width_in, u32 width_out)
+{
+ const short *coeff;
+ u16 hscale_ratio;
+ u32 ntaps = scaler->num_hori_taps;
+
+ /*
+ * Scale Down Mode will use dynamic filter selection logic
+ * Scale Up Mode (including 1:1) will always use 6 tap filter
+ */
+ if (width_out < width_in) {
+ hscale_ratio = ((width_in * 10) / width_out);
+
+ switch (scaler->num_hori_taps) {
+ case XV_HSCALER_TAPS_6:
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ break;
+ case XV_HSCALER_TAPS_8:
+ if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_10:
+ if (hscale_ratio > 25) {
+ coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XV_HSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_12:
+ if (hscale_ratio > 35) {
+ coeff = &xhsc_coeff_taps12[0][0];
+ ntaps = XV_HSCALER_TAPS_12;
+ } else if (hscale_ratio > 25) {
+ coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XV_HSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ default:
+ dev_info(scaler->dev, "Unsupported H-scaler number of taps\n");
+ return -EINVAL;
+ }
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ xv_hscaler_load_ext_coeff(scaler, coeff, ntaps);
+ return 0;
+}
+
+/**
+ * xv_hscaler_set_coeff - Sets h-scaler coefficients
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets coefficients of h-scaler.
+ */
+static void xv_hscaler_set_coeff(struct xilinx_scaler *scaler)
+{
+ int val, i, j, offset, rd_indx;
+ u32 ntaps = scaler->num_hori_taps;
+ u32 nphases = scaler->max_num_phases;
+ u32 base_addr;
+
+ offset = (XV_HSCALER_MAX_H_TAPS - ntaps) / 2;
+ base_addr = V_HSCALER_OFF + XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE;
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (scaler->hscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (scaler->hscaler_coeff[i][rd_indx] &
+ XHSC_MASK_LOW_16BITS);
+ xilinx_scaler_write(scaler->base, base_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+/**
+ * xv_vscaler_load_ext_coeff - Loads external coefficients of v-scaler
+ * @scaler: Pointer to scaler device structure
+ * @coeff: Pointer to coeff array
+ * @ntaps: number of taps
+ *
+ * This function loads v-scaler coefficients.
+ */
+static void
+xv_vscaler_load_ext_coeff(struct xilinx_scaler *scaler,
+ const short *coeff, u32 ntaps)
+{
+ int i, j, pad, offset;
+ u32 nphases = scaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_VSCALER_MAX_V_TAPS - ntaps;
+ offset = pad ? (pad >> 1) : 0;
+ /* Load User defined coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ scaler->vscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+ if (pad) {
+ /* effective taps < max_taps */
+ for (i = 0; i < nphases; i++) {
+ /* pad left */
+ for (j = 0; j < offset; j++)
+ scaler->vscaler_coeff[i][j] = 0;
+ /* pad right */
+ j = ntaps + offset;
+ for (; j < XV_VSCALER_MAX_V_TAPS; j++)
+ scaler->vscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+/**
+ * xv_vscaler_set_coeff - Sets v-scaler coefficients
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets coefficients of v-scaler.
+ */
+static void xv_vscaler_set_coeff(struct xilinx_scaler *scaler)
+{
+ u32 nphases = scaler->max_num_phases;
+ u32 ntaps = scaler->num_vert_taps;
+ int val, i, j, offset, rd_indx;
+ u32 base_addr;
+
+ offset = (XV_VSCALER_MAX_V_TAPS - ntaps) / 2;
+ base_addr = V_VSCALER_OFF + XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE;
+
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (scaler->vscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (scaler->vscaler_coeff[i][rd_indx] &
+ XVSC_MASK_LOW_16BITS);
+ xilinx_scaler_write(scaler->base, base_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+/**
+ * xv_vscaler_coeff_select - Selection of V-Scaler coefficients of operation
+ * @scaler: Pointer to Scaler device structure
+ * @height_in: Height of input video
+ * @height_out: Height of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 10-tap
+ * filter may operate with 6 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * V-scaler number of taps.
+ */
+static int
+xv_vscaler_select_coeff(struct xilinx_scaler *scaler,
+ u32 height_in, u32 height_out)
+{
+ const short *coeff;
+ u16 vscale_ratio;
+ u32 ntaps = scaler->num_vert_taps;
+
+ /*
+ * Scale Down Mode will use dynamic filter selection logic
+ * Scale Up Mode (including 1:1) will always use 6 tap filter
+ */
+
+ if (height_out < height_in) {
+ vscale_ratio = ((height_in * 10) / height_out);
+
+ switch (scaler->num_vert_taps) {
+ case XV_VSCALER_TAPS_6:
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ break;
+ case XV_VSCALER_TAPS_8:
+ if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ case XV_VSCALER_TAPS_10:
+ if (vscale_ratio > 25) {
+ coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XV_VSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ case XV_VSCALER_TAPS_12:
+ if (vscale_ratio > 35) {
+ coeff = &xvsc_coeff_taps12[0][0];
+ ntaps = XV_VSCALER_TAPS_12;
+ } else if (vscale_ratio > 25) {
+ coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XV_VSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+
+ xv_vscaler_load_ext_coeff(scaler, coeff, ntaps);
+ return 0;
+}
+
+/**
+ * xv_hscaler_set_phases - Sets phases of h-scaler
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets phases of h-scaler.
+ */
+static void
+xv_hscaler_set_phases(struct xilinx_scaler *scaler)
+{
+ u32 loop_width;
+ u32 index, val;
+ u32 offset, i, lsb, msb;
+
+ loop_width = scaler->max_pixels / scaler->pix_per_clk;
+ offset = V_HSCALER_OFF + XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE;
+
+ switch (scaler->pix_per_clk) {
+ case XSCALER_PPC_1:
+ index = 0;
+ for (i = 0; i < loop_width; i += 2) {
+ lsb = scaler->H_phases[i] & XHSC_MASK_LOW_16BITS;
+ msb = scaler->H_phases[i + 1] & XHSC_MASK_LOW_16BITS;
+ val = (msb << 16 | lsb);
+ xilinx_scaler_write(scaler->base, offset +
+ (index * 4), val);
+ ++index;
+ }
+ return;
+ case XSCALER_PPC_2:
+ for (i = 0; i < loop_width; i++) {
+ val = (scaler->H_phases[i] & XHSC_MASK_LOW_32BITS);
+ xilinx_scaler_write(scaler->base, offset +
+ (i * 4), val);
+ }
+ return;
+ }
+}
+
+/**
+ * xv_vscaler_setup_video_fmt - Sets video format of v-scaler
+ * @scaler: Pointer to scaler device structure
+ * @code_in: format to be set
+ *
+ * This function set the given format of v-scaler
+ *
+ * Return: format value on success. -EINVAL for invalid format.
+ *
+ */
+static int
+xv_vscaler_setup_video_fmt(struct xilinx_scaler *scaler, u32 code_in)
+{
+ u32 video_in;
+
+ switch (code_in) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ video_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ video_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ video_in = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ video_in = XVIDC_CSF_YCRCB_420;
+ break;
+ default:
+ dev_info(scaler->dev, "Vscaler Unsupported media fmt\n");
+ return -EINVAL;
+ }
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ video_in);
+ return video_in;
+}
+
+/**
+ * xv_hscaler_setup_video_fmt - Sets video format of h-scaler
+ * @scaler: Pointer to scaler device structure
+ * @code_out: bus format to be set
+ * @vsc_out: return value of vscaler
+ *
+ * This function set the given video format of h-scaler
+ *
+ * Return: format value on success. -EINVAL for invalid format.
+ *
+ */
+static int xv_hscaler_setup_video_fmt(struct xilinx_scaler *scaler,
+ u32 code_out, u32 vsc_out)
+{
+ u32 video_out;
+
+ switch (vsc_out) {
+ case XVIDC_CSF_YCRCB_422:
+ break;
+ case XVIDC_CSF_YCRCB_444:
+ break;
+ case XVIDC_CSF_RGB:
+ break;
+ case XVIDC_CSF_YCRCB_420:
+ break;
+ default:
+ dev_info(scaler->dev, "unsupported format from Vscaler");
+ return -EINVAL;
+ }
+
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ vsc_out);
+
+ switch (code_out) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ video_out = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ video_out = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ video_out = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ video_out = XVIDC_CSF_YCRCB_420;
+ break;
+ default:
+ dev_info(scaler->dev, "Hscaler Unsupported Out media fmt\n");
+ return -EINVAL;
+ }
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA,
+ video_out);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_parse_of - Parse device tree information
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function reads the device tree contents
+ *
+ * Return: 0 on success. -EINVAL for invalid value.
+ *
+ */
+static int xilinx_scaler_parse_of(struct xilinx_scaler *scaler)
+{
+ int ret;
+ u32 dt_ppc;
+ struct device_node *node = scaler->dev->of_node;
+
+ scaler->ctrl_clk = devm_clk_get(scaler->dev, "aclk_ctrl");
+ if (IS_ERR(scaler->ctrl_clk)) {
+ ret = PTR_ERR(scaler->ctrl_clk);
+ dev_err(scaler->dev, "failed to get axi lite clk %d\n", ret);
+ return ret;
+ }
+
+ scaler->axis_clk = devm_clk_get(scaler->dev, "aclk_axis");
+ if (IS_ERR(scaler->axis_clk)) {
+ ret = PTR_ERR(scaler->axis_clk);
+ dev_err(scaler->dev, "failed to get video clk %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,h-scaler-taps",
+ &scaler->num_hori_taps);
+ if (ret < 0) {
+ dev_info(scaler->dev, "h-scaler-taps not present in DT\n");
+ return ret;
+ }
+ switch (scaler->num_hori_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_HSCALER_TAPS_4:
+ scaler->is_polyphase = false;
+ break;
+ case XV_HSCALER_TAPS_6:
+ case XV_HSCALER_TAPS_8:
+ case XV_HSCALER_TAPS_10:
+ case XV_HSCALER_TAPS_12:
+ scaler->is_polyphase = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,v-scaler-taps",
+ &scaler->num_vert_taps);
+ if (ret < 0) {
+ dev_info(scaler->dev, "v-scaler-taps not present in DT\n");
+ return ret;
+ }
+
+ switch (scaler->num_vert_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_VSCALER_TAPS_4:
+ if (scaler->num_vert_taps != scaler->num_hori_taps)
+ return -EINVAL;
+ break;
+ case XV_VSCALER_TAPS_6:
+ case XV_VSCALER_TAPS_8:
+ case XV_VSCALER_TAPS_10:
+ case XV_VSCALER_TAPS_12:
+ scaler->is_polyphase = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,samples-per-clk", &dt_ppc);
+ if (ret < 0) {
+ dev_info(scaler->dev, "PPC is missing in DT\n");
+ return ret;
+ }
+ if (dt_ppc != XSCALER_PPC_1 && dt_ppc != XSCALER_PPC_2) {
+ dev_info(scaler->dev, "Unsupported ppc: %d", dt_ppc);
+ return -EINVAL;
+ }
+ scaler->pix_per_clk = dt_ppc;
+
+ /* Reset GPIO */
+ scaler->rst_gpio = devm_gpiod_get(scaler->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(scaler->rst_gpio)) {
+ if (PTR_ERR(scaler->rst_gpio) != -EPROBE_DEFER)
+ dev_err(scaler->dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(scaler->rst_gpio);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height",
+ &scaler->max_lines);
+ if (ret < 0) {
+ dev_err(scaler->dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (scaler->max_lines > XSCALER_MAX_HEIGHT ||
+ scaler->max_lines < XSCALER_MIN_HEIGHT) {
+ dev_err(scaler->dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width",
+ &scaler->max_pixels);
+ if (ret < 0) {
+ dev_err(scaler->dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (scaler->max_pixels > XSCALER_MAX_WIDTH ||
+ scaler->max_pixels < XSCALER_MIN_WIDTH) {
+ dev_err(scaler->dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_scaler_stream - Set up v-scaler and h-scaler for streaming
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets up the required configuration of v-scaler and h-scaler
+ *
+ * Return: 0 on success. Returns -EINVAL on failure conditions.
+ */
+static int xilinx_scaler_stream(struct xilinx_scaler *scaler)
+{
+ u32 fmt_in, fmt_out;
+ u32 pixel_rate;
+ u32 line_rate;
+ int ret;
+
+ fmt_in = scaler->fmt_in;
+ fmt_out = scaler->fmt_out;
+ line_rate = (scaler->height_in * STEP_PRECISION) / scaler->height_out;
+
+ if (scaler->is_polyphase) {
+ ret = xv_vscaler_select_coeff(scaler, scaler->height_in,
+ scaler->height_out);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: vscaler select coeff\n");
+ return ret;
+ }
+ xv_vscaler_set_coeff(scaler);
+ }
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA,
+ line_rate);
+ ret = xv_vscaler_setup_video_fmt(scaler, scaler->fmt_in);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: vscaler setup video format\n");
+ return ret;
+ }
+ pixel_rate = (scaler->width_in * STEP_PRECISION) / scaler->width_out;
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA,
+ pixel_rate);
+ ret = xv_hscaler_setup_video_fmt(scaler, scaler->fmt_out, ret);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: vscaler setup video format\n");
+ return ret;
+ }
+ if (scaler->is_polyphase) {
+ ret = xv_hscaler_select_coeff(scaler, scaler->width_in,
+ scaler->width_out);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: hscaler select coeff\n");
+ return ret;
+ }
+ xv_hscaler_set_coeff(scaler);
+ }
+ xv_hscaler_calculate_phases(scaler, scaler->width_in,
+ scaler->width_out, pixel_rate);
+ xv_hscaler_set_phases(scaler);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_enable - enabes scaler sub-cores
+ * @bridge: bridge instance
+ *
+ * This function enables the scaler sub-cores
+ *
+ * Return: 0 on success. Return -EINVAL on failure conditions.
+ *
+ */
+static int xilinx_scaler_bridge_enable(struct xlnx_bridge *bridge)
+{
+ int ret;
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ ret = xilinx_scaler_stream(scaler);
+ if (ret)
+ return ret;
+
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xilinx_scaler_enable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_IP_AXIS);
+ return ret;
+}
+
+/**
+ * xilinx_scaler_bridge_disable - disables scaler sub-cores
+ * @bridge: bridge instance
+ *
+ * This function disables the scaler sub-cores
+ */
+static void xilinx_scaler_bridge_disable(struct xlnx_bridge *bridge)
+{
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ xilinx_scaler_disable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_ALL_BLOCKS);
+}
+
+/**
+ * xilinx_scaler_bridge_set_input - Sets the input parameters of scaler
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the input parameters of scaler
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_scaler_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ if (width > scaler->max_pixels || height > scaler->max_lines)
+ return -EINVAL;
+
+ scaler->height_in = height;
+ scaler->width_in = width;
+ scaler->fmt_in = bus_fmt;
+
+ /* IP Reset through GPIO */
+ gpiod_set_value_cansleep(scaler->rst_gpio, XSCALER_RESET_ASSERT);
+ gpiod_set_value_cansleep(scaler->rst_gpio, XSCALER_RESET_DEASSERT);
+ xilinx_scaler_reset(scaler);
+ memset(scaler->H_phases, 0, sizeof(scaler->H_phases));
+
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA, height);
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA, width);
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA, width);
+
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_get_input_fmts - input formats supported by scaler
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the input video formats information scaler
+ * Return: 0 on success.
+ */
+static int xilinx_scaler_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_scaler_video_fmts;
+ *count = ARRAY_SIZE(xilinx_scaler_video_fmts);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_set_output - Sets the output parameters of scaler
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the output parameters of scaler
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_scaler_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ if (width > scaler->max_pixels || height > scaler->max_lines)
+ return -EINVAL;
+
+ scaler->height_out = height;
+ scaler->width_out = width;
+ scaler->fmt_out = bus_fmt;
+
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA, height);
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA, height);
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA, width);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_get_output_fmts - output formats supported by scaler
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the output video formats information scaler
+ * Return: 0 on success.
+ */
+static int xilinx_scaler_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_scaler_video_fmts;
+ *count = ARRAY_SIZE(xilinx_scaler_video_fmts);
+ return 0;
+}
+
+static int xilinx_scaler_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_scaler *scaler;
+ int ret;
+
+ scaler = devm_kzalloc(dev, sizeof(*scaler), GFP_KERNEL);
+ if (!scaler)
+ return -ENOMEM;
+ scaler->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ scaler->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(scaler->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, scaler);
+
+ ret = xilinx_scaler_parse_of(scaler);
+ if (ret < 0) {
+ dev_info(scaler->dev, "parse_of failed\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(scaler->ctrl_clk);
+ if (ret) {
+ dev_err(scaler->dev, "unable to enable axi lite clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(scaler->axis_clk);
+ if (ret) {
+ dev_err(scaler->dev, "unable to enable video clk %d\n", ret);
+ goto err_ctrl_clk;
+ }
+
+ scaler->max_num_phases = XSCALER_MAX_PHASES;
+
+ /* Reset the Global IP Reset through a GPIO */
+ gpiod_set_value_cansleep(scaler->rst_gpio, XSCALER_RESET_DEASSERT);
+ xilinx_scaler_reset(scaler);
+
+ scaler->bridge.enable = &xilinx_scaler_bridge_enable;
+ scaler->bridge.disable = &xilinx_scaler_bridge_disable;
+ scaler->bridge.set_input = &xilinx_scaler_bridge_set_input;
+ scaler->bridge.get_input_fmts = &xilinx_scaler_bridge_get_input_fmts;
+ scaler->bridge.set_output = &xilinx_scaler_bridge_set_output;
+ scaler->bridge.get_output_fmts = &xilinx_scaler_bridge_get_output_fmts;
+ scaler->bridge.of_node = dev->of_node;
+
+ ret = xlnx_bridge_register(&scaler->bridge);
+ if (ret) {
+ dev_info(scaler->dev, "Bridge registration failed\n");
+ goto err_axis_clk;
+ }
+ dev_info(scaler->dev, "xlnx drm scaler experimental driver probed\n");
+
+ return 0;
+
+err_axis_clk:
+ clk_disable_unprepare(scaler->axis_clk);
+err_ctrl_clk:
+ clk_disable_unprepare(scaler->ctrl_clk);
+ return ret;
+}
+
+static int xilinx_scaler_remove(struct platform_device *pdev)
+{
+ struct xilinx_scaler *scaler = platform_get_drvdata(pdev);
+
+ xlnx_bridge_unregister(&scaler->bridge);
+ clk_disable_unprepare(scaler->axis_clk);
+ clk_disable_unprepare(scaler->ctrl_clk);
+ return 0;
+}
+
+static const struct of_device_id xilinx_scaler_of_match[] = {
+ { .compatible = "xlnx,vpss-scaler"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_scaler_of_match);
+
+static struct platform_driver scaler_bridge_driver = {
+ .probe = xilinx_scaler_probe,
+ .remove = xilinx_scaler_remove,
+ .driver = {
+ .name = "xlnx,scaler-bridge",
+ .of_match_table = xilinx_scaler_of_match,
+ },
+};
+
+module_platform_driver(scaler_bridge_driver);
+
+MODULE_AUTHOR("Venkateshwar Rao <vgannava@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA SCALER Bridge Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi.c b/drivers/gpu/drm/xlnx/xlnx_sdi.c
new file mode 100644
index 000000000000..9fb7b5db5589
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi.c
@@ -0,0 +1,1227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI Tx Subsystem driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/videomode.h>
+#include "xlnx_sdi_modes.h"
+#include "xlnx_sdi_timing.h"
+
+#include "xlnx_bridge.h"
+
+/* SDI register offsets */
+#define XSDI_TX_RST_CTRL 0x00
+#define XSDI_TX_MDL_CTRL 0x04
+#define XSDI_TX_GLBL_IER 0x0C
+#define XSDI_TX_ISR_STAT 0x10
+#define XSDI_TX_IER_STAT 0x14
+#define XSDI_TX_ST352_LINE 0x18
+#define XSDI_TX_ST352_DATA_CH0 0x1C
+#define XSDI_TX_VER 0x3C
+#define XSDI_TX_SYS_CFG 0x40
+#define XSDI_TX_STS_SB_TDATA 0x60
+#define XSDI_TX_AXI4S_STS1 0x68
+#define XSDI_TX_AXI4S_STS2 0x6C
+#define XSDI_TX_ST352_DATA_DS2 0x70
+
+/* MODULE_CTRL register masks */
+#define XSDI_TX_CTRL_M BIT(7)
+#define XSDI_TX_CTRL_INS_CRC BIT(12)
+#define XSDI_TX_CTRL_INS_ST352 BIT(13)
+#define XSDI_TX_CTRL_OVR_ST352 BIT(14)
+#define XSDI_TX_CTRL_INS_SYNC_BIT BIT(16)
+#define XSDI_TX_CTRL_USE_ANC_IN BIT(18)
+#define XSDI_TX_CTRL_INS_LN BIT(19)
+#define XSDI_TX_CTRL_INS_EDH BIT(20)
+#define XSDI_TX_CTRL_MODE 0x7
+#define XSDI_TX_CTRL_MUX 0x7
+#define XSDI_TX_CTRL_MODE_SHIFT 4
+#define XSDI_TX_CTRL_M_SHIFT 7
+#define XSDI_TX_CTRL_MUX_SHIFT 8
+#define XSDI_TX_CTRL_ST352_F2_EN_SHIFT 15
+#define XSDI_TX_CTRL_420_BIT BIT(21)
+#define XSDI_TX_CTRL_INS_ST352_CHROMA BIT(23)
+#define XSDI_TX_CTRL_USE_DS2_3GA BIT(24)
+
+/* TX_ST352_LINE register masks */
+#define XSDI_TX_ST352_LINE_MASK GENMASK(10, 0)
+#define XSDI_TX_ST352_LINE_F2_SHIFT 16
+
+/* ISR STAT register masks */
+#define XSDI_GTTX_RSTDONE_INTR BIT(0)
+#define XSDI_TX_CE_ALIGN_ERR_INTR BIT(1)
+#define XSDI_AXI4S_VID_LOCK_INTR BIT(8)
+#define XSDI_OVERFLOW_INTR BIT(9)
+#define XSDI_UNDERFLOW_INTR BIT(10)
+#define XSDI_IER_EN_MASK (XSDI_GTTX_RSTDONE_INTR | \
+ XSDI_TX_CE_ALIGN_ERR_INTR | \
+ XSDI_OVERFLOW_INTR | \
+ XSDI_UNDERFLOW_INTR)
+
+/* RST_CTRL_OFFSET masks */
+#define XSDI_TX_CTRL_EN BIT(0)
+#define XSDI_TX_BRIDGE_CTRL_EN BIT(8)
+#define XSDI_TX_AXI4S_CTRL_EN BIT(9)
+/* STS_SB_TX_TDATA masks */
+#define XSDI_TX_TDATA_GT_RESETDONE BIT(2)
+
+#define XSDI_TX_MUX_SD_HD_3GA 0
+#define XSDI_TX_MUX_3GB 1
+#define XSDI_TX_MUX_8STREAM_6G_12G 2
+#define XSDI_TX_MUX_4STREAM_6G 3
+#define XSDI_TX_MUX_16STREAM_12G 4
+
+#define SDI_MAX_DATASTREAM 8
+#define PIXELS_PER_CLK 2
+#define XSDI_CH_SHIFT 29
+#define XST352_PROG_PIC BIT(6)
+#define XST352_PROG_TRANS BIT(7)
+#define XST352_2048_SHIFT BIT(6)
+#define XST352_YUV420_MASK 0x03
+#define ST352_BYTE3 0x00
+#define ST352_BYTE4 0x01
+#define GT_TIMEOUT 50
+/* SDI modes */
+#define XSDI_MODE_HD 0
+#define XSDI_MODE_SD 1
+#define XSDI_MODE_3GA 2
+#define XSDI_MODE_3GB 3
+#define XSDI_MODE_6G 4
+#define XSDI_MODE_12G 5
+
+#define SDI_TIMING_PARAMS_SIZE 48
+
+/**
+ * enum payload_line_1 - Payload Ids Line 1 number
+ * @PAYLD_LN1_HD_3_6_12G: line 1 HD,3G,6G or 12G mode value
+ * @PAYLD_LN1_SDPAL: line 1 SD PAL mode value
+ * @PAYLD_LN1_SDNTSC: line 1 SD NTSC mode value
+ */
+enum payload_line_1 {
+ PAYLD_LN1_HD_3_6_12G = 10,
+ PAYLD_LN1_SDPAL = 9,
+ PAYLD_LN1_SDNTSC = 13
+};
+
+/**
+ * enum payload_line_2 - Payload Ids Line 2 number
+ * @PAYLD_LN2_HD_3_6_12G: line 2 HD,3G,6G or 12G mode value
+ * @PAYLD_LN2_SDPAL: line 2 SD PAL mode value
+ * @PAYLD_LN2_SDNTSC: line 2 SD NTSC mode value
+ */
+enum payload_line_2 {
+ PAYLD_LN2_HD_3_6_12G = 572,
+ PAYLD_LN2_SDPAL = 322,
+ PAYLD_LN2_SDNTSC = 276
+};
+
+/**
+ * struct xlnx_sdi - Core configuration SDI Tx subsystem device structure
+ * @encoder: DRM encoder structure
+ * @connector: DRM connector structure
+ * @dev: device structure
+ * @base: Base address of SDI subsystem
+ * @mode_flags: SDI operation mode related flags
+ * @wait_event: wait event
+ * @event_received: wait event status
+ * @enable_st352_chroma: Able to send ST352 packets in Chroma stream.
+ * @enable_anc_data: Enable/Disable Ancillary Data insertion for Audio
+ * @sdi_mode: configurable SDI mode parameter, supported values are:
+ * 0 - HD
+ * 1 - SD
+ * 2 - 3GA
+ * 3 - 3GB
+ * 4 - 6G
+ * 5 - 12G
+ * @sdi_mod_prop_val: configurable SDI mode parameter value
+ * @sdi_data_strm: configurable SDI data stream parameter
+ * @sdi_data_strm_prop_val: configurable number of SDI data streams
+ * value currently supported are 2, 4 and 8
+ * @sdi_420_in: Specifying input bus color format parameter to SDI
+ * @sdi_420_in_val: 1 for yuv420 and 0 for yuv422
+ * @sdi_420_out: configurable SDI out color format parameter
+ * @sdi_420_out_val: 1 for yuv420 and 0 for yuv422
+ * @is_frac_prop: configurable SDI fractional fps parameter
+ * @is_frac_prop_val: configurable SDI fractional fps parameter value
+ * @bridge: bridge structure
+ * @height_out: configurable bridge output height parameter
+ * @height_out_prop_val: configurable bridge output height parameter value
+ * @width_out: configurable bridge output width parameter
+ * @width_out_prop_val: configurable bridge output width parameter value
+ * @in_fmt: configurable bridge input media format
+ * @in_fmt_prop_val: configurable media bus format value
+ * @out_fmt: configurable bridge output media format
+ * @out_fmt_prop_val: configurable media bus format value
+ * @en_st352_c_prop: configurable ST352 payload on Chroma stream parameter
+ * @en_st352_c_val: configurable ST352 payload on Chroma parameter value
+ * @use_ds2_3ga_prop: Use DS2 instead of DS3 in 3GA mode parameter
+ * @use_ds2_3ga_val: Use DS2 instead of DS3 in 3GA mode parameter value
+ * @video_mode: current display mode
+ * @axi_clk: AXI Lite interface clock
+ * @sditx_clk: SDI Tx Clock
+ * @vidin_clk: Video Clock
+ */
+struct xlnx_sdi {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct device *dev;
+ void __iomem *base;
+ u32 mode_flags;
+ wait_queue_head_t wait_event;
+ bool event_received;
+ bool enable_st352_chroma;
+ bool enable_anc_data;
+ struct drm_property *sdi_mode;
+ u32 sdi_mod_prop_val;
+ struct drm_property *sdi_data_strm;
+ u32 sdi_data_strm_prop_val;
+ struct drm_property *sdi_420_in;
+ bool sdi_420_in_val;
+ struct drm_property *sdi_420_out;
+ bool sdi_420_out_val;
+ struct drm_property *is_frac_prop;
+ bool is_frac_prop_val;
+ struct xlnx_bridge *bridge;
+ struct drm_property *height_out;
+ u32 height_out_prop_val;
+ struct drm_property *width_out;
+ u32 width_out_prop_val;
+ struct drm_property *in_fmt;
+ u32 in_fmt_prop_val;
+ struct drm_property *out_fmt;
+ u32 out_fmt_prop_val;
+ struct drm_property *en_st352_c_prop;
+ bool en_st352_c_val;
+ struct drm_property *use_ds2_3ga_prop;
+ bool use_ds2_3ga_val;
+ struct drm_display_mode video_mode;
+ struct clk *axi_clk;
+ struct clk *sditx_clk;
+ struct clk *vidin_clk;
+};
+
+#define connector_to_sdi(c) container_of(c, struct xlnx_sdi, connector)
+#define encoder_to_sdi(e) container_of(e, struct xlnx_sdi, encoder)
+
+static inline void xlnx_sdi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xlnx_sdi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xlnx_sdi_en_axi4s - Enable SDI Tx AXI4S-to-Video core
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx AXI4S-to-Video core.
+ */
+static void xlnx_sdi_en_axi4s(struct xlnx_sdi *sdi)
+{
+ u32 data;
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_AXI4S_CTRL_EN;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_en_bridge - Enable SDI Tx bridge
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx bridge.
+ */
+static void xlnx_sdi_en_bridge(struct xlnx_sdi *sdi)
+{
+ u32 data;
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_BRIDGE_CTRL_EN;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_irq_handler - SDI Tx interrupt
+ * @irq: irq number
+ * @data: irq data
+ *
+ * Return: IRQ_HANDLED for all cases.
+ *
+ * This is the compact GT ready interrupt.
+ */
+static irqreturn_t xlnx_sdi_irq_handler(int irq, void *data)
+{
+ struct xlnx_sdi *sdi = (struct xlnx_sdi *)data;
+ u32 reg;
+
+ reg = xlnx_sdi_readl(sdi->base, XSDI_TX_ISR_STAT);
+
+ if (reg & XSDI_GTTX_RSTDONE_INTR)
+ dev_dbg(sdi->dev, "GT reset interrupt received\n");
+ if (reg & XSDI_TX_CE_ALIGN_ERR_INTR)
+ dev_err_ratelimited(sdi->dev, "SDI SD CE align error\n");
+ if (reg & XSDI_OVERFLOW_INTR)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Overflow error\n");
+ if (reg & XSDI_UNDERFLOW_INTR)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Underflow error\n");
+ xlnx_sdi_writel(sdi->base, XSDI_TX_ISR_STAT,
+ reg & ~(XSDI_AXI4S_VID_LOCK_INTR));
+
+ reg = xlnx_sdi_readl(sdi->base, XSDI_TX_STS_SB_TDATA);
+ if (reg & XSDI_TX_TDATA_GT_RESETDONE) {
+ sdi->event_received = true;
+ wake_up_interruptible(&sdi->wait_event);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * xlnx_sdi_set_payload_line - set ST352 packet line number
+ * @sdi: Pointer to SDI Tx structure
+ * @line_1: line number used to insert st352 packet for field 1.
+ * @line_2: line number used to insert st352 packet for field 2.
+ *
+ * This function set 352 packet line number.
+ */
+static void xlnx_sdi_set_payload_line(struct xlnx_sdi *sdi,
+ u32 line_1, u32 line_2)
+{
+ u32 data;
+
+ data = ((line_1 & XSDI_TX_ST352_LINE_MASK) |
+ ((line_2 & XSDI_TX_ST352_LINE_MASK) <<
+ XSDI_TX_ST352_LINE_F2_SHIFT));
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_ST352_LINE, data);
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data |= (1 << XSDI_TX_CTRL_ST352_F2_EN_SHIFT);
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_set_payload_data - set ST352 packet payload
+ * @sdi: Pointer to SDI Tx structure
+ * @data_strm: data stream number
+ * @payload: st352 packet payload
+ *
+ * This function set ST352 payload data to corresponding stream.
+ */
+static void xlnx_sdi_set_payload_data(struct xlnx_sdi *sdi,
+ u32 data_strm, u32 payload)
+{
+ xlnx_sdi_writel(sdi->base,
+ (XSDI_TX_ST352_DATA_CH0 + (data_strm * 4)), payload);
+
+ dev_dbg(sdi->dev, "enable_st352_chroma = %d and en_st352_c_val = %d\n",
+ sdi->enable_st352_chroma, sdi->en_st352_c_val);
+ if (sdi->enable_st352_chroma && sdi->en_st352_c_val) {
+ xlnx_sdi_writel(sdi->base,
+ (XSDI_TX_ST352_DATA_DS2 + (data_strm * 4)),
+ payload);
+ }
+}
+
+/**
+ * xlnx_sdi_set_display_disable - Disable the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_sdi_set_display_disable(struct xlnx_sdi *sdi)
+{
+ u32 i;
+
+ for (i = 0; i < SDI_MAX_DATASTREAM; i++)
+ xlnx_sdi_set_payload_data(sdi, i, 0);
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, 0);
+}
+
+/**
+ * xlnx_sdi_payload_config - config the SDI payload parameters
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: display mode
+ *
+ * This function config the SDI st352 payload parameter.
+ */
+static void xlnx_sdi_payload_config(struct xlnx_sdi *sdi, u32 mode)
+{
+ u32 payload_1, payload_2;
+
+ switch (mode) {
+ case XSDI_MODE_SD:
+ payload_1 = PAYLD_LN1_SDPAL;
+ payload_2 = PAYLD_LN2_SDPAL;
+ break;
+ case XSDI_MODE_HD:
+ case XSDI_MODE_3GA:
+ case XSDI_MODE_3GB:
+ case XSDI_MODE_6G:
+ case XSDI_MODE_12G:
+ payload_1 = PAYLD_LN1_HD_3_6_12G;
+ payload_2 = PAYLD_LN2_HD_3_6_12G;
+ break;
+ default:
+ payload_1 = 0;
+ payload_2 = 0;
+ break;
+ }
+
+ xlnx_sdi_set_payload_line(sdi, payload_1, payload_2);
+}
+
+/**
+ * xlnx_sdi_set_mode - Set mode parameters in SDI Tx
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: SDI Tx display mode
+ * @is_frac: 0 - integer 1 - fractional
+ * @mux_ptrn: specifiy the data stream interleaving pattern to be used
+ * This function config the SDI st352 payload parameter.
+ */
+static void xlnx_sdi_set_mode(struct xlnx_sdi *sdi, u32 mode,
+ bool is_frac, u32 mux_ptrn)
+{
+ u32 data;
+
+ xlnx_sdi_payload_config(sdi, mode);
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data &= ~(XSDI_TX_CTRL_MODE << XSDI_TX_CTRL_MODE_SHIFT);
+ data &= ~(XSDI_TX_CTRL_M);
+ data &= ~(XSDI_TX_CTRL_MUX << XSDI_TX_CTRL_MUX_SHIFT);
+ data &= ~XSDI_TX_CTRL_420_BIT;
+
+ data |= (((mode & XSDI_TX_CTRL_MODE) << XSDI_TX_CTRL_MODE_SHIFT) |
+ (is_frac << XSDI_TX_CTRL_M_SHIFT) |
+ ((mux_ptrn & XSDI_TX_CTRL_MUX) << XSDI_TX_CTRL_MUX_SHIFT));
+
+ if (sdi->sdi_420_out_val)
+ data |= XSDI_TX_CTRL_420_BIT;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_set_config_parameters - Configure SDI Tx registers with parameters
+ * given from user application.
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI structure having drm_property parameters
+ * configured from user application and writes them into SDI IP registers.
+ */
+static void xlnx_sdi_set_config_parameters(struct xlnx_sdi *sdi)
+{
+ int mux_ptrn = -EINVAL;
+
+ switch (sdi->sdi_mod_prop_val) {
+ case XSDI_MODE_3GA:
+ mux_ptrn = XSDI_TX_MUX_SD_HD_3GA;
+ break;
+ case XSDI_MODE_3GB:
+ mux_ptrn = XSDI_TX_MUX_3GB;
+ break;
+ case XSDI_MODE_6G:
+ if (sdi->sdi_data_strm_prop_val == 4)
+ mux_ptrn = XSDI_TX_MUX_4STREAM_6G;
+ else if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ case XSDI_MODE_12G:
+ if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ default:
+ mux_ptrn = 0;
+ break;
+ }
+ if (mux_ptrn == -EINVAL) {
+ dev_err(sdi->dev, "%d data stream not supported for %d mode",
+ sdi->sdi_data_strm_prop_val, sdi->sdi_mod_prop_val);
+ return;
+ }
+ xlnx_sdi_set_mode(sdi, sdi->sdi_mod_prop_val, sdi->is_frac_prop_val,
+ mux_ptrn);
+}
+
+/**
+ * xlnx_sdi_atomic_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @connector: pointer Xilinx SDI connector
+ * @state: DRM connector state
+ * @property: pointer to the drm_property structure
+ * @val: SDI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the SDI structure property varabiles with the values.
+ * These values are later used to configure the SDI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int
+xlnx_sdi_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property, uint64_t val)
+{
+ struct xlnx_sdi *sdi = connector_to_sdi(connector);
+
+ if (property == sdi->sdi_mode)
+ sdi->sdi_mod_prop_val = (unsigned int)val;
+ else if (property == sdi->sdi_data_strm)
+ sdi->sdi_data_strm_prop_val = (unsigned int)val;
+ else if (property == sdi->sdi_420_in)
+ sdi->sdi_420_in_val = val;
+ else if (property == sdi->sdi_420_out)
+ sdi->sdi_420_out_val = val;
+ else if (property == sdi->is_frac_prop)
+ sdi->is_frac_prop_val = !!val;
+ else if (property == sdi->height_out)
+ sdi->height_out_prop_val = (unsigned int)val;
+ else if (property == sdi->width_out)
+ sdi->width_out_prop_val = (unsigned int)val;
+ else if (property == sdi->in_fmt)
+ sdi->in_fmt_prop_val = (unsigned int)val;
+ else if (property == sdi->out_fmt)
+ sdi->out_fmt_prop_val = (unsigned int)val;
+ else if (property == sdi->en_st352_c_prop)
+ sdi->en_st352_c_val = !!val;
+ else if (property == sdi->use_ds2_3ga_prop)
+ sdi->use_ds2_3ga_val = !!val;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static int
+xlnx_sdi_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct xlnx_sdi *sdi = connector_to_sdi(connector);
+
+ if (property == sdi->sdi_mode)
+ *val = sdi->sdi_mod_prop_val;
+ else if (property == sdi->sdi_data_strm)
+ *val = sdi->sdi_data_strm_prop_val;
+ else if (property == sdi->sdi_420_in)
+ *val = sdi->sdi_420_in_val;
+ else if (property == sdi->sdi_420_out)
+ *val = sdi->sdi_420_out_val;
+ else if (property == sdi->is_frac_prop)
+ *val = sdi->is_frac_prop_val;
+ else if (property == sdi->height_out)
+ *val = sdi->height_out_prop_val;
+ else if (property == sdi->width_out)
+ *val = sdi->width_out_prop_val;
+ else if (property == sdi->in_fmt)
+ *val = sdi->in_fmt_prop_val;
+ else if (property == sdi->out_fmt)
+ *val = sdi->out_fmt_prop_val;
+ else if (property == sdi->en_st352_c_prop)
+ *val = sdi->en_st352_c_val;
+ else if (property == sdi->use_ds2_3ga_prop)
+ *val = sdi->use_ds2_3ga_val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * xlnx_sdi_get_mode_id - Search for a video mode in the supported modes table
+ *
+ * @mode: mode being searched
+ *
+ * Return: mode id if mode is found OR -EINVAL otherwise
+ */
+static int xlnx_sdi_get_mode_id(struct drm_display_mode *mode)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++)
+ if (drm_mode_equal(&xlnx_sdi_modes[i].mode, mode))
+ return i;
+ return -EINVAL;
+}
+
+/**
+ * xlnx_sdi_drm_add_modes - Adds SDI supported modes
+ * @connector: pointer Xilinx SDI connector
+ *
+ * Return: Count of modes added
+ *
+ * This function adds the SDI modes supported and returns its count
+ */
+static int xlnx_sdi_drm_add_modes(struct drm_connector *connector)
+{
+ int num_modes = 0;
+ u32 i;
+ struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++) {
+ const struct drm_display_mode *ptr = &xlnx_sdi_modes[i].mode;
+
+ mode = drm_mode_duplicate(dev, ptr);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ }
+ return num_modes;
+}
+
+static enum drm_connector_status
+xlnx_sdi_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void xlnx_sdi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xlnx_sdi_connector_funcs = {
+ .detect = xlnx_sdi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xlnx_sdi_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_set_property = xlnx_sdi_atomic_set_property,
+ .atomic_get_property = xlnx_sdi_atomic_get_property,
+};
+
+static struct drm_encoder *
+xlnx_sdi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_sdi(connector)->encoder);
+}
+
+static int xlnx_sdi_get_modes(struct drm_connector *connector)
+{
+ return xlnx_sdi_drm_add_modes(connector);
+}
+
+static struct drm_connector_helper_funcs xlnx_sdi_connector_helper_funcs = {
+ .get_modes = xlnx_sdi_get_modes,
+ .best_encoder = xlnx_sdi_best_encoder,
+};
+
+/**
+ * xlnx_sdi_drm_connector_create_property - create SDI connector properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ *
+ * This function takes the xilinx SDI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void
+xlnx_sdi_drm_connector_create_property(struct drm_connector *base_connector)
+{
+ struct drm_device *dev = base_connector->dev;
+ struct xlnx_sdi *sdi = connector_to_sdi(base_connector);
+
+ sdi->is_frac_prop = drm_property_create_bool(dev, 0, "is_frac");
+ sdi->sdi_mode = drm_property_create_range(dev, 0,
+ "sdi_mode", 0, 5);
+ sdi->sdi_data_strm = drm_property_create_range(dev, 0,
+ "sdi_data_stream", 2, 8);
+ sdi->sdi_420_in = drm_property_create_bool(dev, 0, "sdi_420_in");
+ sdi->sdi_420_out = drm_property_create_bool(dev, 0, "sdi_420_out");
+ sdi->height_out = drm_property_create_range(dev, 0,
+ "height_out", 2, 4096);
+ sdi->width_out = drm_property_create_range(dev, 0,
+ "width_out", 2, 4096);
+ sdi->in_fmt = drm_property_create_range(dev, 0,
+ "in_fmt", 0, 16384);
+ sdi->out_fmt = drm_property_create_range(dev, 0,
+ "out_fmt", 0, 16384);
+ if (sdi->enable_st352_chroma) {
+ sdi->en_st352_c_prop = drm_property_create_bool(dev, 0,
+ "en_st352_c");
+ sdi->use_ds2_3ga_prop = drm_property_create_bool(dev, 0,
+ "use_ds2_3ga");
+ }
+}
+
+/**
+ * xlnx_sdi_drm_connector_attach_property - attach SDI connector
+ * properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ */
+static void
+xlnx_sdi_drm_connector_attach_property(struct drm_connector *base_connector)
+{
+ struct xlnx_sdi *sdi = connector_to_sdi(base_connector);
+ struct drm_mode_object *obj = &base_connector->base;
+
+ if (sdi->sdi_mode)
+ drm_object_attach_property(obj, sdi->sdi_mode, 0);
+
+ if (sdi->sdi_data_strm)
+ drm_object_attach_property(obj, sdi->sdi_data_strm, 0);
+
+ if (sdi->sdi_420_in)
+ drm_object_attach_property(obj, sdi->sdi_420_in, 0);
+
+ if (sdi->sdi_420_out)
+ drm_object_attach_property(obj, sdi->sdi_420_out, 0);
+
+ if (sdi->is_frac_prop)
+ drm_object_attach_property(obj, sdi->is_frac_prop, 0);
+
+ if (sdi->height_out)
+ drm_object_attach_property(obj, sdi->height_out, 0);
+
+ if (sdi->width_out)
+ drm_object_attach_property(obj, sdi->width_out, 0);
+
+ if (sdi->in_fmt)
+ drm_object_attach_property(obj, sdi->in_fmt, 0);
+
+ if (sdi->out_fmt)
+ drm_object_attach_property(obj, sdi->out_fmt, 0);
+
+ if (sdi->en_st352_c_prop)
+ drm_object_attach_property(obj, sdi->en_st352_c_prop, 0);
+
+ if (sdi->use_ds2_3ga_prop)
+ drm_object_attach_property(obj, sdi->use_ds2_3ga_prop, 0);
+}
+
+static int xlnx_sdi_create_connector(struct drm_encoder *encoder)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+ struct drm_connector *connector = &sdi->connector;
+ int ret;
+
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xlnx_sdi_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret) {
+ dev_err(sdi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xlnx_sdi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xlnx_sdi_drm_connector_create_property(connector);
+ xlnx_sdi_drm_connector_attach_property(connector);
+
+ return 0;
+}
+
+/**
+ * xlnx_sdi_set_display_enable - Enables the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_sdi_set_display_enable(struct xlnx_sdi *sdi)
+{
+ u32 data;
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_CTRL_EN;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_calc_st352_payld - calculate the st352 payload
+ *
+ * @sdi: pointer to SDI Tx structure
+ * @mode: DRM display mode
+ *
+ * This function calculates the st352 payload to be configured.
+ * Please refer to SMPTE ST352 documents for it.
+ * Return: return st352 payload
+ */
+static u32 xlnx_sdi_calc_st352_payld(struct xlnx_sdi *sdi,
+ struct drm_display_mode *mode)
+{
+ u8 byt1, byt2;
+ u16 is_p;
+ int id;
+ u32 sdi_mode = sdi->sdi_mod_prop_val;
+ bool is_frac = sdi->is_frac_prop_val;
+ u32 byt3 = ST352_BYTE3;
+
+ id = xlnx_sdi_get_mode_id(mode);
+ dev_dbg(sdi->dev, "mode id: %d\n", id);
+ if (mode->hdisplay == 2048 || mode->hdisplay == 4096)
+ byt3 |= XST352_2048_SHIFT;
+ if (sdi->sdi_420_in_val)
+ byt3 |= XST352_YUV420_MASK;
+
+ /* byte 2 calculation */
+ is_p = !(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ byt2 = xlnx_sdi_modes[id].st352_byt2[is_frac];
+ if (sdi_mode == XSDI_MODE_3GB ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN) || is_p)
+ byt2 |= XST352_PROG_PIC;
+ if (is_p && mode->vtotal >= 1125)
+ byt2 |= XST352_PROG_TRANS;
+
+ /* byte 1 calculation */
+ byt1 = xlnx_sdi_modes[id].st352_byt1[sdi_mode];
+
+ return (ST352_BYTE4 << 24 | byt3 << 16 | byt2 << 8 | byt1);
+}
+
+static void xlnx_sdi_setup(struct xlnx_sdi *sdi)
+{
+ u32 reg;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+
+ reg = xlnx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ reg |= XSDI_TX_CTRL_INS_CRC | XSDI_TX_CTRL_INS_ST352 |
+ XSDI_TX_CTRL_OVR_ST352 | XSDI_TX_CTRL_INS_SYNC_BIT |
+ XSDI_TX_CTRL_INS_EDH;
+
+ if (sdi->enable_anc_data)
+ reg |= XSDI_TX_CTRL_USE_ANC_IN;
+
+ if (sdi->enable_st352_chroma) {
+ if (sdi->en_st352_c_val) {
+ reg |= XSDI_TX_CTRL_INS_ST352_CHROMA;
+ if (sdi->use_ds2_3ga_val)
+ reg |= XSDI_TX_CTRL_USE_DS2_3GA;
+ else
+ reg &= ~XSDI_TX_CTRL_USE_DS2_3GA;
+ } else {
+ reg &= ~XSDI_TX_CTRL_INS_ST352_CHROMA;
+ reg &= ~XSDI_TX_CTRL_USE_DS2_3GA;
+ }
+ }
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, reg);
+ xlnx_sdi_writel(sdi->base, XSDI_TX_IER_STAT, XSDI_IER_EN_MASK);
+ xlnx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 1);
+ xlnx_stc_reset(sdi->base);
+}
+
+/**
+ * xlnx_sdi_encoder_atomic_mode_set - drive the SDI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @crtc_state: DRM crtc state
+ * @connector_state: DRM connector state
+ *
+ * This function derives the SDI IP timing parameters from the timing
+ * values given to timing module.
+ */
+static void xlnx_sdi_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct videomode vm;
+ u32 payload, i;
+ u32 sditx_blank, vtc_blank;
+
+ /* Set timing parameters as per bridge output parameters */
+ xlnx_bridge_set_input(sdi->bridge, adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay, sdi->in_fmt_prop_val);
+ xlnx_bridge_set_output(sdi->bridge, sdi->width_out_prop_val,
+ sdi->height_out_prop_val, sdi->out_fmt_prop_val);
+ xlnx_bridge_enable(sdi->bridge);
+
+ if (sdi->bridge) {
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++) {
+ if (xlnx_sdi_modes[i].mode.hdisplay ==
+ sdi->width_out_prop_val &&
+ xlnx_sdi_modes[i].mode.vdisplay ==
+ sdi->height_out_prop_val &&
+ xlnx_sdi_modes[i].mode.vrefresh ==
+ adjusted_mode->vrefresh) {
+ memcpy((char *)adjusted_mode +
+ offsetof(struct drm_display_mode,
+ clock),
+ &xlnx_sdi_modes[i].mode.clock,
+ SDI_TIMING_PARAMS_SIZE);
+ break;
+ }
+ }
+ }
+
+ xlnx_sdi_setup(sdi);
+ xlnx_sdi_set_config_parameters(sdi);
+
+ /* set st352 payloads */
+ payload = xlnx_sdi_calc_st352_payld(sdi, adjusted_mode);
+ dev_dbg(sdi->dev, "payload : %0x\n", payload);
+
+ for (i = 0; i < sdi->sdi_data_strm_prop_val / 2; i++) {
+ if (sdi->sdi_mod_prop_val == XSDI_MODE_3GB)
+ payload |= (i << 1) << XSDI_CH_SHIFT;
+ xlnx_sdi_set_payload_data(sdi, i, payload);
+ }
+
+ /* UHDSDI is fixed 2 pixels per clock, horizontal timings div by 2 */
+ vm.hactive = adjusted_mode->hdisplay / PIXELS_PER_CLK;
+ vm.hfront_porch = (adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay) / PIXELS_PER_CLK;
+ vm.hback_porch = (adjusted_mode->htotal -
+ adjusted_mode->hsync_end) / PIXELS_PER_CLK;
+ vm.hsync_len = (adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start) / PIXELS_PER_CLK;
+
+ vm.vactive = adjusted_mode->vdisplay;
+ vm.vfront_porch = adjusted_mode->vsync_start -
+ adjusted_mode->vdisplay;
+ vm.vback_porch = adjusted_mode->vtotal -
+ adjusted_mode->vsync_end;
+ vm.vsync_len = adjusted_mode->vsync_end -
+ adjusted_mode->vsync_start;
+ vm.flags = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vm.flags |= DISPLAY_FLAGS_INTERLACED;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ vm.flags |= DISPLAY_FLAGS_HSYNC_LOW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ vm.flags |= DISPLAY_FLAGS_VSYNC_LOW;
+
+ do {
+ sditx_blank = (adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay) +
+ (adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start) +
+ (adjusted_mode->htotal -
+ adjusted_mode->hsync_end);
+
+ vtc_blank = (vm.hfront_porch + vm.hback_porch +
+ vm.hsync_len) * PIXELS_PER_CLK;
+
+ if (vtc_blank != sditx_blank)
+ vm.hfront_porch++;
+ } while (vtc_blank < sditx_blank);
+
+ vm.pixelclock = adjusted_mode->clock * 1000;
+
+ /* parameters for sdi audio */
+ sdi->video_mode.vdisplay = adjusted_mode->vdisplay;
+ sdi->video_mode.hdisplay = adjusted_mode->hdisplay;
+ sdi->video_mode.vrefresh = adjusted_mode->vrefresh;
+ sdi->video_mode.flags = adjusted_mode->flags;
+
+ xlnx_stc_sig(sdi->base, &vm);
+}
+
+static void xlnx_sdi_commit(struct drm_encoder *encoder)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+ long ret;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+ xlnx_sdi_set_display_enable(sdi);
+ ret = wait_event_interruptible_timeout(sdi->wait_event,
+ sdi->event_received,
+ usecs_to_jiffies(GT_TIMEOUT));
+ if (!ret) {
+ dev_err(sdi->dev, "Timeout: GT interrupt not received\n");
+ return;
+ }
+ sdi->event_received = false;
+ /* enable sdi bridge, timing controller and Axi4s_vid_out_ctrl */
+ xlnx_sdi_en_bridge(sdi);
+ xlnx_stc_enable(sdi->base);
+ xlnx_sdi_en_axi4s(sdi);
+}
+
+static void xlnx_sdi_disable(struct drm_encoder *encoder)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+
+ if (sdi->bridge)
+ xlnx_bridge_disable(sdi->bridge);
+
+ xlnx_sdi_set_display_disable(sdi);
+ xlnx_stc_disable(sdi->base);
+}
+
+static const struct drm_encoder_helper_funcs xlnx_sdi_encoder_helper_funcs = {
+ .atomic_mode_set = xlnx_sdi_encoder_atomic_mode_set,
+ .enable = xlnx_sdi_commit,
+ .disable = xlnx_sdi_disable,
+};
+
+static const struct drm_encoder_funcs xlnx_sdi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xlnx_sdi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_sdi *sdi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &sdi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * SDI tx drivers. DRM framework can support more than one CRTCs and
+ * SDI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+
+ drm_encoder_init(drm_dev, encoder, &xlnx_sdi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+ drm_encoder_helper_add(encoder, &xlnx_sdi_encoder_helper_funcs);
+
+ ret = xlnx_sdi_create_connector(encoder);
+ if (ret) {
+ dev_err(sdi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ }
+ return ret;
+}
+
+static void xlnx_sdi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_sdi *sdi = dev_get_drvdata(dev);
+
+ xlnx_sdi_set_display_disable(sdi);
+ xlnx_stc_disable(sdi->base);
+ drm_encoder_cleanup(&sdi->encoder);
+ drm_connector_cleanup(&sdi->connector);
+ xlnx_bridge_disable(sdi->bridge);
+}
+
+static const struct component_ops xlnx_sdi_component_ops = {
+ .bind = xlnx_sdi_bind,
+ .unbind = xlnx_sdi_unbind,
+};
+
+static int xlnx_sdi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xlnx_sdi *sdi;
+ struct device_node *vpss_node;
+ int ret, irq;
+ struct device_node *ports, *port;
+ u32 nports = 0, portmask = 0;
+
+ sdi = devm_kzalloc(dev, sizeof(*sdi), GFP_KERNEL);
+ if (!sdi)
+ return -ENOMEM;
+
+ sdi->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(sdi->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(sdi->base);
+ }
+ platform_set_drvdata(pdev, sdi);
+
+ sdi->axi_clk = devm_clk_get(dev, "s_axi_aclk");
+ if (IS_ERR(sdi->axi_clk)) {
+ ret = PTR_ERR(sdi->axi_clk);
+ dev_err(dev, "failed to get s_axi_aclk %d\n", ret);
+ return ret;
+ }
+
+ sdi->sditx_clk = devm_clk_get(dev, "sdi_tx_clk");
+ if (IS_ERR(sdi->sditx_clk)) {
+ ret = PTR_ERR(sdi->sditx_clk);
+ dev_err(dev, "failed to get sdi_tx_clk %d\n", ret);
+ return ret;
+ }
+
+ sdi->vidin_clk = devm_clk_get(dev, "video_in_clk");
+ if (IS_ERR(sdi->vidin_clk)) {
+ ret = PTR_ERR(sdi->vidin_clk);
+ dev_err(dev, "failed to get video_in_clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sdi->axi_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable axi_clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sdi->sditx_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable sditx_clk %d\n", ret);
+ goto err_disable_axi_clk;
+ }
+
+ ret = clk_prepare_enable(sdi->vidin_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable vidin_clk %d\n", ret);
+ goto err_disable_sditx_clk;
+ }
+
+ /* in case all "port" nodes are grouped under a "ports" node */
+ ports = of_get_child_by_name(sdi->dev->of_node, "ports");
+ if (!ports) {
+ dev_dbg(dev, "Searching for port nodes in device node.\n");
+ ports = sdi->dev->of_node;
+ }
+
+ for_each_child_of_node(ports, port) {
+ struct device_node *endpoint;
+ u32 index;
+
+ if (!port->name || of_node_cmp(port->name, "port")) {
+ dev_dbg(dev, "port name is null or node name is not port!\n");
+ continue;
+ }
+
+ endpoint = of_get_next_child(port, NULL);
+ if (!endpoint) {
+ dev_err(dev, "No remote port at %s\n", port->name);
+ of_node_put(endpoint);
+ ret = -EINVAL;
+ goto err_disable_vidin_clk;
+ }
+
+ of_node_put(endpoint);
+
+ ret = of_property_read_u32(port, "reg", &index);
+ if (ret) {
+ dev_err(dev, "reg property not present - %d\n", ret);
+ goto err_disable_vidin_clk;
+ }
+
+ portmask |= (1 << index);
+
+ nports++;
+ }
+
+ if (nports == 2 && portmask & 0x3) {
+ dev_dbg(dev, "enable ancillary port\n");
+ sdi->enable_anc_data = true;
+ } else if (nports == 1 && portmask & 0x1) {
+ dev_dbg(dev, "no ancillary port\n");
+ sdi->enable_anc_data = false;
+ } else {
+ dev_err(dev, "Incorrect dt node!\n");
+ ret = -EINVAL;
+ goto err_disable_vidin_clk;
+ }
+
+ sdi->enable_st352_chroma = of_property_read_bool(sdi->dev->of_node,
+ "xlnx,tx-insert-c-str-st352");
+
+ /* disable interrupt */
+ xlnx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_disable_vidin_clk;
+ }
+
+ ret = devm_request_threaded_irq(sdi->dev, irq, NULL,
+ xlnx_sdi_irq_handler, IRQF_ONESHOT,
+ dev_name(sdi->dev), sdi);
+ if (ret < 0)
+ goto err_disable_vidin_clk;
+
+ /* initialize the wait queue for GT reset event */
+ init_waitqueue_head(&sdi->wait_event);
+
+ /* Bridge support */
+ vpss_node = of_parse_phandle(sdi->dev->of_node, "xlnx,vpss", 0);
+ if (vpss_node) {
+ sdi->bridge = of_xlnx_bridge_get(vpss_node);
+ if (!sdi->bridge) {
+ dev_info(sdi->dev, "Didn't get bridge instance\n");
+ ret = -EPROBE_DEFER;
+ goto err_disable_vidin_clk;
+ }
+ }
+
+ /* video mode properties needed by audio driver are shared to audio
+ * driver through a pointer in platform data. This will be used in
+ * audio driver. The solution may be needed to modify/extend to avoid
+ * probable error scenarios
+ */
+ pdev->dev.platform_data = &sdi->video_mode;
+
+ ret = component_add(dev, &xlnx_sdi_component_ops);
+ if (ret < 0)
+ goto err_disable_vidin_clk;
+
+ return ret;
+
+err_disable_vidin_clk:
+ clk_disable_unprepare(sdi->vidin_clk);
+err_disable_sditx_clk:
+ clk_disable_unprepare(sdi->sditx_clk);
+err_disable_axi_clk:
+ clk_disable_unprepare(sdi->axi_clk);
+
+ return ret;
+}
+
+static int xlnx_sdi_remove(struct platform_device *pdev)
+{
+ struct xlnx_sdi *sdi = platform_get_drvdata(pdev);
+
+ component_del(&pdev->dev, &xlnx_sdi_component_ops);
+ clk_disable_unprepare(sdi->vidin_clk);
+ clk_disable_unprepare(sdi->sditx_clk);
+ clk_disable_unprepare(sdi->axi_clk);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_sdi_of_match[] = {
+ { .compatible = "xlnx,sdi-tx"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_sdi_of_match);
+
+static struct platform_driver sdi_tx_driver = {
+ .probe = xlnx_sdi_probe,
+ .remove = xlnx_sdi_remove,
+ .driver = {
+ .name = "xlnx-sdi-tx",
+ .of_match_table = xlnx_sdi_of_match,
+ },
+};
+
+module_platform_driver(sdi_tx_driver);
+
+MODULE_AUTHOR("Saurabh Sengar <saurabhs@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA SDI Tx Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi_modes.h b/drivers/gpu/drm/xlnx/xlnx_sdi_modes.h
new file mode 100644
index 000000000000..534f7d80f29c
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi_modes.h
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI modes timing values for various
+ * resolutions
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#ifndef _XLNX_SDI_MODES_H_
+#define _XLNX_SDI_MODES_H_
+
+/**
+ * struct xlnx_sdi_display_config - SDI supported modes structure
+ * @mode: drm display mode
+ * @st352_byt2: st352 byte 2 value
+ * index 0 : value for integral fps
+ * index 1 : value for fractional fps
+ * @st352_byt1: st352 byte 1 value
+ * index 0 : value for HD mode
+ * index 1 : value for SD mode
+ * index 2 : value for 3GA
+ * index 3 : value for 3GB
+ * index 4 : value for 6G
+ * index 5 : value for 12G
+ */
+struct xlnx_sdi_display_config {
+ struct drm_display_mode mode;
+ u8 st352_byt2[2];
+ u8 st352_byt1[6];
+};
+
+/*
+ * xlnx_sdi_modes - SDI DRM modes
+ */
+static const struct xlnx_sdi_display_config xlnx_sdi_modes[] = {
+ /* 0 - dummy, VICs start at 1 */
+ { },
+ /* SD: 720x486i@60Hz */
+ {{ DRM_MODE("720x486i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
+ 801, 858, 0, 243, 247, 250, 262, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* SD: 720x576i@50Hz */
+ {{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
+ 795, 864, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* HD: 1280x720@25Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2990, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@24Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 3155, 4125, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@30Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2330, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@50Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@60Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1920x1080@24Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@25Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@30Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@48Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@50Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@60Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@24Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@25Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@30Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@48Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@50Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@60Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@24Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@25Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@30Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@30Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@25Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@24Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@48Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@50Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@60Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@60Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2136,
+ 2180, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@50Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@48Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@96Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2291,
+ 2379, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@100Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@120Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@96Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2377,
+ 2421, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@100Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2322,
+ 2366, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@120Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 6G: 3840x2160@30Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@25Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@24Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@24Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@25Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@30Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@48Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@50Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@60Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@48Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@50Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@60Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 593408, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+};
+
+#endif /* _XLNX_SDI_MODES_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi_timing.c b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.c
new file mode 100644
index 000000000000..61ee98e87fdc
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI Tx timing controller driver
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#include <drm/drmP.h>
+#include <linux/device.h>
+#include <video/videomode.h>
+#include "xlnx_sdi_timing.h"
+
+/* timing controller register offsets */
+#define XSTC_CTL 0x00
+#define XSTC_STATS 0x04
+#define XSTC_ERROR 0x08
+#define XSTC_GASIZE 0x60
+#define XSTC_GENC 0x68
+#define XSTC_GPOL 0x6c
+#define XSTC_GHSIZE 0x70
+#define XSTC_GVSIZE 0x74
+#define XSTC_GHSYNC 0x78
+#define XSTC_GVBH_F0 0x7c
+#define XSTC_GVSYNC_F0 0x80
+#define XSTC_GVSH_F0 0x84
+#define XSTC_GVBH_F1 0x88
+#define XSTC_GVSYNC_F1 0x8C
+#define XSTC_GVSH_F1 0x90
+#define XSTC_GASIZE_F1 0x94
+#define XSTC_OFFSET 0x10000
+
+/* timing controller register bit */
+#define XSTC_CTL_FIP BIT(6) /* field id polarity */
+#define XSTC_CTL_ACP BIT(5) /* active chroma polarity */
+#define XSTC_CTL_AVP BIT(4) /* active video polarity */
+#define XSTC_CTL_HSP BIT(3) /* hori sync polarity */
+#define XSTC_CTL_VSP BIT(2) /* vert sync polarity */
+#define XSTC_CTL_HBP BIT(1) /* hori blank polarity */
+#define XSTC_CTL_VBP BIT(0) /* vert blank polarity */
+#define XSTC_CTL_FIPSS BIT(26) /* field id polarity source */
+#define XSTC_CTL_ACPSS BIT(25) /* active chroma polarity src */
+#define XSTC_CTL_AVPSS BIT(24) /* active video polarity src */
+#define XSTC_CTL_HSPSS BIT(23) /* hori sync polarity src */
+#define XSTC_CTL_VSPSS BIT(22) /* vert sync polarity src */
+#define XSTC_CTL_HBPSS BIT(21) /* hori blank polarity src */
+#define XSTC_CTL_VBPSS BIT(20) /* vert blank polarity src */
+#define XSTC_CTL_VCSS BIT(18) /* chroma src */
+#define XSTC_CTL_VASS BIT(17) /* vertical offset src */
+#define XSTC_CTL_VBSS BIT(16) /* vertical sync end src */
+#define XSTC_CTL_VSSS BIT(15) /* vertical sync start src */
+#define XSTC_CTL_VFSS BIT(14) /* vertical active size src */
+#define XSTC_CTL_VTSS BIT(13) /* vertical frame size src */
+#define XSTC_CTL_HBSS BIT(11) /* horiz sync end src */
+#define XSTC_CTL_HSSS BIT(10) /* horiz sync start src */
+#define XSTC_CTL_HFSS BIT(9) /* horiz active size src */
+#define XSTC_CTL_HTSS BIT(8) /* horiz frame size src */
+#define XSTC_CTL_GE BIT(2) /* timing generator enable */
+#define XSTC_CTL_RU BIT(1) /* timing register update */
+
+/* timing generator horizontal 1 */
+#define XSTC_GH1_BPSTART_MASK GENMASK(28, 16)
+#define XSTC_GH1_BPSTART_SHIFT 16
+#define XSTC_GH1_SYNCSTART_MASK GENMASK(12, 0)
+/* timing generator vertical 1 (filed 0) */
+#define XSTC_GV1_BPSTART_MASK GENMASK(28, 16)
+#define XSTC_GV1_BPSTART_SHIFT 16
+#define XSTC_GV1_SYNCSTART_MASK GENMASK(12, 0)
+/* timing generator/detector vblank/vsync horizontal offset registers */
+#define XSTC_XVXHOX_HEND_MASK GENMASK(28, 16)
+#define XSTC_XVXHOX_HEND_SHIFT 16
+#define XSTC_XVXHOX_HSTART_MASK GENMASK(12, 0)
+
+#define XSTC_GHFRAME_HSIZE GENMASK(12, 0)
+#define XSTC_GVFRAME_HSIZE_F1 GENMASK(12, 0)
+#define XSTC_GA_ACTSIZE_MASK GENMASK(12, 0)
+/* reset register bit definition */
+#define XSTC_RST BIT(31)
+/* Interlaced bit in XSTC_GENC */
+#define XSTC_GENC_INTERL BIT(6)
+
+/**
+ * struct xlnx_stc_polarity - timing signal polarity
+ *
+ * @field_id: field ID polarity
+ * @vblank: vblank polarity
+ * @vsync: vsync polarity
+ * @hblank: hblank polarity
+ * @hsync: hsync polarity
+ */
+struct xlnx_stc_polarity {
+ u8 field_id;
+ u8 vblank;
+ u8 vsync;
+ u8 hblank;
+ u8 hsync;
+};
+
+/**
+ * struct xlnx_stc_hori_off - timing signal horizontal offset
+ *
+ * @v0blank_hori_start: vblank horizontal start (field 0)
+ * @v0blank_hori_end: vblank horizontal end (field 0)
+ * @v0sync_hori_start: vsync horizontal start (field 0)
+ * @v0sync_hori_end: vsync horizontal end (field 0)
+ * @v1blank_hori_start: vblank horizontal start (field 1)
+ * @v1blank_hori_end: vblank horizontal end (field 1)
+ * @v1sync_hori_start: vsync horizontal start (field 1)
+ * @v1sync_hori_end: vsync horizontal end (field 1)
+ */
+struct xlnx_stc_hori_off {
+ u16 v0blank_hori_start;
+ u16 v0blank_hori_end;
+ u16 v0sync_hori_start;
+ u16 v0sync_hori_end;
+ u16 v1blank_hori_start;
+ u16 v1blank_hori_end;
+ u16 v1sync_hori_start;
+ u16 v1sync_hori_end;
+};
+
+/**
+ * xlnx_stc_writel - Memory mapped SDI Tx timing controller write
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ * @val: value to be written
+ *
+ * This function writes the value to SDI TX timing controller registers
+ */
+static inline void xlnx_stc_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + XSTC_OFFSET + offset);
+}
+
+/**
+ * xlnx_stc_readl - Memory mapped timing controllerregister read
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ *
+ * Return: The contents of the SDI Tx timing controller register
+ *
+ * This function returns the contents of the corresponding SDI Tx register.
+ */
+static inline u32 xlnx_stc_readl(void __iomem *base, int offset)
+{
+ return readl(base + XSTC_OFFSET + offset);
+}
+
+/**
+ * xlnx_stc_enable - Enable timing controller
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function enables the SDI Tx subsystem's timing controller
+ */
+void xlnx_stc_enable(void __iomem *base)
+{
+ u32 reg;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg | XSTC_CTL_GE);
+}
+
+/**
+ * xlnx_stc_disable - Disable timing controller
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function disables the SDI Tx subsystem's timing controller
+ */
+void xlnx_stc_disable(void __iomem *base)
+{
+ u32 reg;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg & ~XSTC_CTL_GE);
+}
+
+/**
+ * xlnx_stc_reset - Reset timing controller
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function resets the SDI Tx subsystem's timing controller
+ */
+void xlnx_stc_reset(void __iomem *base)
+{
+ u32 reg;
+
+ xlnx_stc_writel(base, XSTC_CTL, XSTC_RST);
+
+ /* enable register update */
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg | XSTC_CTL_RU);
+}
+
+/**
+ * xlnx_stc_polarity - Configure timing signal polarity
+ * @base: Base address of SDI Tx subsystem
+ * @polarity: timing signal polarity data
+ *
+ * This function configure timing signal polarity
+ */
+static void xlnx_stc_polarity(void __iomem *base,
+ struct xlnx_stc_polarity *polarity)
+{
+ u32 reg = 0;
+
+ reg = XSTC_CTL_ACP;
+ reg |= XSTC_CTL_AVP;
+ if (polarity->field_id)
+ reg |= XSTC_CTL_FIP;
+ if (polarity->vblank)
+ reg |= XSTC_CTL_VBP;
+ if (polarity->vsync)
+ reg |= XSTC_CTL_VSP;
+ if (polarity->hblank)
+ reg |= XSTC_CTL_HBP;
+ if (polarity->hsync)
+ reg |= XSTC_CTL_HSP;
+
+ xlnx_stc_writel(base, XSTC_GPOL, reg);
+}
+
+/**
+ * xlnx_stc_hori_off - Configure horzontal timing offset
+ * @base: Base address of SDI Tx subsystem
+ * @hori_off: horizontal offset configuration data
+ * @flags: Display flags
+ *
+ * This function configure horizontal offset
+ */
+static void xlnx_stc_hori_off(void __iomem *base,
+ struct xlnx_stc_hori_off *hori_off,
+ enum display_flags flags)
+{
+ u32 reg;
+
+ /* Calculate and update Generator VBlank Hori field 0 */
+ reg = hori_off->v0blank_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v0blank_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVBH_F0, reg);
+
+ /* Calculate and update Generator VSync Hori field 0 */
+ reg = hori_off->v0sync_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v0sync_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVSH_F0, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = hori_off->v1blank_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v1blank_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVBH_F1, reg);
+ }
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = hori_off->v1sync_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v1sync_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVSH_F1, reg);
+ }
+}
+
+/**
+ * xlnx_stc_src - Configure timing source
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function configure timing source
+ */
+static void xlnx_stc_src(void __iomem *base)
+{
+ u32 reg;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ reg |= XSTC_CTL_VCSS;
+ reg |= XSTC_CTL_VASS;
+ reg |= XSTC_CTL_VBSS;
+ reg |= XSTC_CTL_VSSS;
+ reg |= XSTC_CTL_VFSS;
+ reg |= XSTC_CTL_VTSS;
+ reg |= XSTC_CTL_HBSS;
+ reg |= XSTC_CTL_HSSS;
+ reg |= XSTC_CTL_HFSS;
+ reg |= XSTC_CTL_HTSS;
+ xlnx_stc_writel(base, XSTC_CTL, reg);
+}
+
+/**
+ * xlnx_stc_sig - Generates timing signal
+ * @base: Base address of SDI Tx subsystem
+ * @vm: video mode
+ *
+ * This function generated the timing for given vide mode
+ */
+void xlnx_stc_sig(void __iomem *base, struct videomode *vm)
+{
+ u32 reg;
+ u32 htotal, hactive, hsync_start, hbackporch_start;
+ u32 vtotal, vactive, vsync_start, vbackporch_start;
+ struct xlnx_stc_hori_off hori_off;
+ struct xlnx_stc_polarity polarity;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg & ~XSTC_CTL_RU);
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vsync_len +
+ vm->vback_porch;
+ hactive = vm->hactive;
+ vactive = vm->vactive;
+ hsync_start = vm->hactive + vm->hfront_porch;
+ vsync_start = vm->vactive + vm->vfront_porch;
+ hbackporch_start = hsync_start + vm->hsync_len;
+ vbackporch_start = vsync_start + vm->vsync_len;
+
+ DRM_DEBUG_DRIVER("ha: %d, va: %d\n", hactive, vactive);
+ DRM_DEBUG_DRIVER("hs: %d, hb: %d\n", hsync_start, hbackporch_start);
+ DRM_DEBUG_DRIVER("vs: %d, vb: %d\n", vsync_start, vbackporch_start);
+ DRM_DEBUG_DRIVER("ht: %d, vt: %d\n", htotal, vtotal);
+
+ reg = htotal & XSTC_GHFRAME_HSIZE;
+ xlnx_stc_writel(base, XSTC_GHSIZE, reg);
+ reg = vtotal & XSTC_GVFRAME_HSIZE_F1;
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ if (vm->pixelclock == 148500000)
+ reg |= (reg + 2) <<
+ XSTC_GV1_BPSTART_SHIFT;
+ else
+ reg |= (reg + 1) <<
+ XSTC_GV1_BPSTART_SHIFT;
+ } else {
+ reg |= reg << XSTC_GV1_BPSTART_SHIFT;
+ }
+ xlnx_stc_writel(base, XSTC_GVSIZE, reg);
+ reg = hactive & XSTC_GA_ACTSIZE_MASK;
+ reg |= (vactive & XSTC_GA_ACTSIZE_MASK) << 16;
+ xlnx_stc_writel(base, XSTC_GASIZE, reg);
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ if (vactive == 243)
+ reg = ((vactive + 1) & XSTC_GA_ACTSIZE_MASK) << 16;
+ else
+ reg = (vactive & XSTC_GA_ACTSIZE_MASK) << 16;
+ xlnx_stc_writel(base, XSTC_GASIZE_F1, reg);
+ }
+
+ reg = hsync_start & XSTC_GH1_SYNCSTART_MASK;
+ reg |= (hbackporch_start << XSTC_GH1_BPSTART_SHIFT) &
+ XSTC_GH1_BPSTART_MASK;
+ xlnx_stc_writel(base, XSTC_GHSYNC, reg);
+ reg = vsync_start & XSTC_GV1_SYNCSTART_MASK;
+ reg |= (vbackporch_start << XSTC_GV1_BPSTART_SHIFT) &
+ XSTC_GV1_BPSTART_MASK;
+
+ /*
+ * Fix the Vsync_vstart and vsync_vend of Field 0
+ * for all interlaced modes including 3GB.
+ */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ reg = ((((reg & XSTC_GV1_BPSTART_MASK) >>
+ XSTC_GV1_BPSTART_SHIFT) - 1) <<
+ XSTC_GV1_BPSTART_SHIFT) |
+ ((reg & XSTC_GV1_SYNCSTART_MASK) - 1);
+
+ xlnx_stc_writel(base, XSTC_GVSYNC_F0, reg);
+
+ /*
+ * Fix the Vsync_vstart and vsync_vend of Field 1
+ * for interlaced and 3GB modes.
+ */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ if (vm->pixelclock == 148500000)
+ /* Revert and increase by 1 for 3GB mode */
+ reg = ((((reg & XSTC_GV1_BPSTART_MASK) >>
+ XSTC_GV1_BPSTART_SHIFT) + 2) <<
+ XSTC_GV1_BPSTART_SHIFT) |
+ ((reg & XSTC_GV1_SYNCSTART_MASK) + 2);
+ else
+ /* Only revert the reduction */
+ reg = ((((reg & XSTC_GV1_BPSTART_MASK) >>
+ XSTC_GV1_BPSTART_SHIFT) + 1) <<
+ XSTC_GV1_BPSTART_SHIFT) |
+ ((reg & XSTC_GV1_SYNCSTART_MASK) + 1);
+ }
+
+ hori_off.v0blank_hori_start = hactive;
+ hori_off.v0blank_hori_end = hactive;
+ hori_off.v0sync_hori_start = hsync_start;
+ hori_off.v0sync_hori_end = hsync_start;
+ hori_off.v1blank_hori_start = hactive;
+ hori_off.v1blank_hori_end = hactive;
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ hori_off.v1sync_hori_start = hsync_start - (htotal / 2);
+ hori_off.v1sync_hori_end = hsync_start - (htotal / 2);
+ xlnx_stc_writel(base, XSTC_GVSYNC_F1, reg);
+ reg = xlnx_stc_readl(base, XSTC_GENC)
+ | XSTC_GENC_INTERL;
+ xlnx_stc_writel(base, XSTC_GENC, reg);
+ } else {
+ hori_off.v1sync_hori_start = hsync_start;
+ hori_off.v1sync_hori_end = hsync_start;
+ reg = xlnx_stc_readl(base, XSTC_GENC)
+ & ~XSTC_GENC_INTERL;
+ xlnx_stc_writel(base, XSTC_GENC, reg);
+ }
+
+ xlnx_stc_hori_off(base, &hori_off, vm->flags);
+ /* set up polarity */
+ memset(&polarity, 0x0, sizeof(polarity));
+ polarity.hsync = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vsync = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.hblank = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vblank = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.field_id = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
+ xlnx_stc_polarity(base, &polarity);
+
+ xlnx_stc_src(base);
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg | XSTC_CTL_RU);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi_timing.h b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.h
new file mode 100644
index 000000000000..4ca9f8972e0a
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI Tx timing controller driver
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#ifndef _XLNX_SDI_TIMING_H_
+#define _XLNX_SDI_TIMING_H_
+
+struct videomode;
+
+void xlnx_stc_enable(void __iomem *base);
+void xlnx_stc_disable(void __iomem *base);
+void xlnx_stc_reset(void __iomem *base);
+void xlnx_stc_sig(void __iomem *base, struct videomode *vm);
+
+#endif /* _XLNX_SDI_TIMING_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_vtc.c b/drivers/gpu/drm/xlnx/xlnx_vtc.c
new file mode 100644
index 000000000000..427b35b84e16
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_vtc.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Video Timing Controller support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ * Saurabh Sengar <saurabhs@xilinx.com>
+ * Vishal Sagar <vishal.sagar@xilinx.com>
+ *
+ * This driver adds support to control the Xilinx Video Timing
+ * Controller connected to the CRTC.
+ */
+
+#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <video/videomode.h>
+#include "xlnx_bridge.h"
+
+/* register offsets */
+#define XVTC_CTL 0x000
+#define XVTC_VER 0x010
+#define XVTC_GASIZE 0x060
+#define XVTC_GENC 0x068
+#define XVTC_GPOL 0x06c
+#define XVTC_GHSIZE 0x070
+#define XVTC_GVSIZE 0x074
+#define XVTC_GHSYNC 0x078
+#define XVTC_GVBHOFF_F0 0x07c
+#define XVTC_GVSYNC_F0 0x080
+#define XVTC_GVSHOFF_F0 0x084
+#define XVTC_GVBHOFF_F1 0x088
+#define XVTC_GVSYNC_F1 0x08C
+#define XVTC_GVSHOFF_F1 0x090
+#define XVTC_GASIZE_F1 0x094
+
+/* vtc control register bits */
+#define XVTC_CTL_SWRESET BIT(31)
+#define XVTC_CTL_FIPSS BIT(26)
+#define XVTC_CTL_ACPSS BIT(25)
+#define XVTC_CTL_AVPSS BIT(24)
+#define XVTC_CTL_HSPSS BIT(23)
+#define XVTC_CTL_VSPSS BIT(22)
+#define XVTC_CTL_HBPSS BIT(21)
+#define XVTC_CTL_VBPSS BIT(20)
+#define XVTC_CTL_VCSS BIT(18)
+#define XVTC_CTL_VASS BIT(17)
+#define XVTC_CTL_VBSS BIT(16)
+#define XVTC_CTL_VSSS BIT(15)
+#define XVTC_CTL_VFSS BIT(14)
+#define XVTC_CTL_VTSS BIT(13)
+#define XVTC_CTL_HBSS BIT(11)
+#define XVTC_CTL_HSSS BIT(10)
+#define XVTC_CTL_HFSS BIT(9)
+#define XVTC_CTL_HTSS BIT(8)
+#define XVTC_CTL_GE BIT(2)
+#define XVTC_CTL_RU BIT(1)
+
+/* vtc generator polarity register bits */
+#define XVTC_GPOL_FIP BIT(6)
+#define XVTC_GPOL_ACP BIT(5)
+#define XVTC_GPOL_AVP BIT(4)
+#define XVTC_GPOL_HSP BIT(3)
+#define XVTC_GPOL_VSP BIT(2)
+#define XVTC_GPOL_HBP BIT(1)
+#define XVTC_GPOL_VBP BIT(0)
+
+/* vtc generator horizontal 1 */
+#define XVTC_GH1_BPSTART_MASK GENMASK(28, 16)
+#define XVTC_GH1_BPSTART_SHIFT 16
+#define XVTC_GH1_SYNCSTART_MASK GENMASK(12, 0)
+/* vtc generator vertical 1 (field 0) */
+#define XVTC_GV1_BPSTART_MASK GENMASK(28, 16)
+#define XVTC_GV1_BPSTART_SHIFT 16
+#define XVTC_GV1_SYNCSTART_MASK GENMASK(12, 0)
+/* vtc generator/detector vblank/vsync horizontal offset registers */
+#define XVTC_XVXHOX_HEND_MASK GENMASK(28, 16)
+#define XVTC_XVXHOX_HEND_SHIFT 16
+#define XVTC_XVXHOX_HSTART_MASK GENMASK(12, 0)
+
+#define XVTC_GHFRAME_HSIZE GENMASK(12, 0)
+#define XVTC_GVFRAME_HSIZE_F1 GENMASK(12, 0)
+#define XVTC_GA_ACTSIZE_MASK GENMASK(12, 0)
+
+/* vtc generator encoding register bits */
+#define XVTC_GENC_INTERL BIT(6)
+
+/**
+ * struct xlnx_vtc - Xilinx VTC object
+ *
+ * @bridge: xilinx bridge structure
+ * @dev: device structure
+ * @base: base addr
+ * @ppc: pixels per clock
+ * @axi_clk: AXI Lite clock
+ * @vid_clk: Video clock
+ */
+struct xlnx_vtc {
+ struct xlnx_bridge bridge;
+ struct device *dev;
+ void __iomem *base;
+ u32 ppc;
+ struct clk *axi_clk;
+ struct clk *vid_clk;
+};
+
+static inline void xlnx_vtc_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xlnx_vtc_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static inline struct xlnx_vtc *bridge_to_vtc(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct xlnx_vtc, bridge);
+}
+
+static void xlnx_vtc_reset(struct xlnx_vtc *vtc)
+{
+ u32 reg;
+
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, XVTC_CTL_SWRESET);
+
+ /* enable register update */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg | XVTC_CTL_RU);
+}
+
+/**
+ * xlnx_vtc_enable - Enable the VTC
+ * @bridge: xilinx bridge structure pointer
+ *
+ * Return:
+ * Zero on success.
+ *
+ * This function enables the VTC
+ */
+static int xlnx_vtc_enable(struct xlnx_bridge *bridge)
+{
+ u32 reg;
+ struct xlnx_vtc *vtc = bridge_to_vtc(bridge);
+
+ /* enable generator */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg | XVTC_CTL_GE);
+ dev_dbg(vtc->dev, "enabled\n");
+ return 0;
+}
+
+/**
+ * xlnx_vtc_disable - Disable the VTC
+ * @bridge: xilinx bridge structure pointer
+ *
+ * This function disables and resets the VTC.
+ */
+static void xlnx_vtc_disable(struct xlnx_bridge *bridge)
+{
+ u32 reg;
+ struct xlnx_vtc *vtc = bridge_to_vtc(bridge);
+
+ /* disable generator and reset */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg & ~XVTC_CTL_GE);
+ xlnx_vtc_reset(vtc);
+ dev_dbg(vtc->dev, "disabled\n");
+}
+
+/**
+ * xlnx_vtc_set_timing - Configures the VTC
+ * @bridge: xilinx bridge structure pointer
+ * @vm: video mode requested
+ *
+ * Return:
+ * Zero on success.
+ *
+ * This function calculates the timing values from the video mode
+ * structure passed from the CRTC and configures the VTC.
+ */
+static int xlnx_vtc_set_timing(struct xlnx_bridge *bridge,
+ struct videomode *vm)
+{
+ u32 reg;
+ u32 htotal, hactive, hsync_start, hbackporch_start;
+ u32 vtotal, vactive, vsync_start, vbackporch_start;
+ struct xlnx_vtc *vtc = bridge_to_vtc(bridge);
+
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg & ~XVTC_CTL_RU);
+
+ vm->hactive /= vtc->ppc;
+ vm->hfront_porch /= vtc->ppc;
+ vm->hback_porch /= vtc->ppc;
+ vm->hsync_len /= vtc->ppc;
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vsync_len +
+ vm->vback_porch;
+
+ hactive = vm->hactive;
+ vactive = vm->vactive;
+
+ hsync_start = vm->hactive + vm->hfront_porch;
+ vsync_start = vm->vactive + vm->vfront_porch;
+
+ hbackporch_start = hsync_start + vm->hsync_len;
+ vbackporch_start = vsync_start + vm->vsync_len;
+
+ dev_dbg(vtc->dev, "ha: %d, va: %d\n", hactive, vactive);
+ dev_dbg(vtc->dev, "ht: %d, vt: %d\n", htotal, vtotal);
+ dev_dbg(vtc->dev, "hs: %d, hb: %d\n", hsync_start, hbackporch_start);
+ dev_dbg(vtc->dev, "vs: %d, vb: %d\n", vsync_start, vbackporch_start);
+
+ reg = htotal & XVTC_GHFRAME_HSIZE;
+ xlnx_vtc_writel(vtc->base, XVTC_GHSIZE, reg);
+
+ reg = vtotal & XVTC_GVFRAME_HSIZE_F1;
+ reg |= reg << XVTC_GV1_BPSTART_SHIFT;
+ xlnx_vtc_writel(vtc->base, XVTC_GVSIZE, reg);
+
+ reg = hactive & XVTC_GA_ACTSIZE_MASK;
+ reg |= (vactive & XVTC_GA_ACTSIZE_MASK) << 16;
+ xlnx_vtc_writel(vtc->base, XVTC_GASIZE, reg);
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ xlnx_vtc_writel(vtc->base, XVTC_GASIZE_F1, reg);
+
+ reg = hsync_start & XVTC_GH1_SYNCSTART_MASK;
+ reg |= (hbackporch_start << XVTC_GH1_BPSTART_SHIFT) &
+ XVTC_GH1_BPSTART_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GHSYNC, reg);
+
+ reg = vsync_start & XVTC_GV1_SYNCSTART_MASK;
+ reg |= (vbackporch_start << XVTC_GV1_BPSTART_SHIFT) &
+ XVTC_GV1_BPSTART_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVSYNC_F0, reg);
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ xlnx_vtc_writel(vtc->base, XVTC_GVSYNC_F1, reg);
+ reg = xlnx_vtc_readl(vtc->base, XVTC_GENC) | XVTC_GENC_INTERL;
+ xlnx_vtc_writel(vtc->base, XVTC_GENC, reg);
+ } else {
+ reg = xlnx_vtc_readl(vtc->base, XVTC_GENC) & ~XVTC_GENC_INTERL;
+ xlnx_vtc_writel(vtc->base, XVTC_GENC, reg);
+ }
+
+ /* configure horizontal offset */
+ /* Calculate and update Generator VBlank Hori field 0 */
+ reg = hactive & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hactive << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVBHOFF_F0, reg);
+
+ /* Calculate and update Generator VSync Hori field 0 */
+ reg = hsync_start & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hsync_start << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVSHOFF_F0, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = hactive & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hactive << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVBHOFF_F1, reg);
+ }
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = (hsync_start - (htotal / 2)) & XVTC_XVXHOX_HSTART_MASK;
+ reg |= ((hsync_start - (htotal / 2)) <<
+ XVTC_XVXHOX_HEND_SHIFT) & XVTC_XVXHOX_HEND_MASK;
+ } else {
+ reg = hsync_start & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hsync_start << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ }
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ xlnx_vtc_writel(vtc->base, XVTC_GVSHOFF_F1, reg);
+
+ /* configure polarity of signals */
+ reg = 0;
+ reg |= XVTC_GPOL_ACP;
+ reg |= XVTC_GPOL_AVP;
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ reg |= XVTC_GPOL_FIP;
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH) {
+ reg |= XVTC_GPOL_VBP;
+ reg |= XVTC_GPOL_VSP;
+ }
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH) {
+ reg |= XVTC_GPOL_HBP;
+ reg |= XVTC_GPOL_HSP;
+ }
+ xlnx_vtc_writel(vtc->base, XVTC_GPOL, reg);
+
+ /* configure timing source */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ reg |= XVTC_CTL_VCSS;
+ reg |= XVTC_CTL_VASS;
+ reg |= XVTC_CTL_VBSS;
+ reg |= XVTC_CTL_VSSS;
+ reg |= XVTC_CTL_VFSS;
+ reg |= XVTC_CTL_VTSS;
+ reg |= XVTC_CTL_HBSS;
+ reg |= XVTC_CTL_HSSS;
+ reg |= XVTC_CTL_HFSS;
+ reg |= XVTC_CTL_HTSS;
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg);
+
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg | XVTC_CTL_RU);
+ dev_dbg(vtc->dev, "set timing done\n");
+
+ return 0;
+}
+
+static int xlnx_vtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct xlnx_vtc *vtc;
+ struct resource *res;
+ int ret;
+
+ vtc = devm_kzalloc(dev, sizeof(*vtc), GFP_KERNEL);
+ if (!vtc)
+ return -ENOMEM;
+
+ vtc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get resource for device\n");
+ return -EFAULT;
+ }
+
+ vtc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vtc->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(vtc->base);
+ }
+
+ platform_set_drvdata(pdev, vtc);
+
+ ret = of_property_read_u32(dev->of_node, "xlnx,pixels-per-clock",
+ &vtc->ppc);
+ if (ret || (vtc->ppc != 1 && vtc->ppc != 2 && vtc->ppc != 4)) {
+ dev_err(dev, "failed to get ppc\n");
+ return ret;
+ }
+ dev_info(dev, "vtc ppc = %d\n", vtc->ppc);
+
+ vtc->axi_clk = devm_clk_get(vtc->dev, "s_axi_aclk");
+ if (IS_ERR(vtc->axi_clk)) {
+ ret = PTR_ERR(vtc->axi_clk);
+ dev_err(dev, "failed to get axi lite clk %d\n", ret);
+ return ret;
+ }
+
+ vtc->vid_clk = devm_clk_get(vtc->dev, "clk");
+ if (IS_ERR(vtc->vid_clk)) {
+ ret = PTR_ERR(vtc->vid_clk);
+ dev_err(dev, "failed to get video clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(vtc->axi_clk);
+ if (ret) {
+ dev_err(vtc->dev, "unable to enable axilite clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(vtc->vid_clk);
+ if (ret) {
+ dev_err(vtc->dev, "unable to enable video clk %d\n", ret);
+ goto err_axi_clk;
+ }
+
+ xlnx_vtc_reset(vtc);
+
+ vtc->bridge.enable = &xlnx_vtc_enable;
+ vtc->bridge.disable = &xlnx_vtc_disable;
+ vtc->bridge.set_timing = &xlnx_vtc_set_timing;
+ vtc->bridge.of_node = dev->of_node;
+ ret = xlnx_bridge_register(&vtc->bridge);
+ if (ret) {
+ dev_err(dev, "Bridge registration failed\n");
+ goto err_vid_clk;
+ }
+
+ dev_info(dev, "Xilinx VTC IP version : 0x%08x\n",
+ xlnx_vtc_readl(vtc->base, XVTC_VER));
+ dev_info(dev, "Xilinx VTC DRM Bridge driver probed\n");
+ return 0;
+
+err_vid_clk:
+ clk_disable_unprepare(vtc->vid_clk);
+err_axi_clk:
+ clk_disable_unprepare(vtc->axi_clk);
+ return ret;
+}
+
+static int xlnx_vtc_remove(struct platform_device *pdev)
+{
+ struct xlnx_vtc *vtc = platform_get_drvdata(pdev);
+
+ xlnx_bridge_unregister(&vtc->bridge);
+ clk_disable_unprepare(vtc->vid_clk);
+ clk_disable_unprepare(vtc->axi_clk);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_vtc_of_match[] = {
+ { .compatible = "xlnx,bridge-v-tc-6.1" },
+ { /* end of table */ },
+};
+
+MODULE_DEVICE_TABLE(of, xlnx_vtc_of_match);
+
+static struct platform_driver xlnx_vtc_bridge_driver = {
+ .probe = xlnx_vtc_probe,
+ .remove = xlnx_vtc_remove,
+ .driver = {
+ .name = "xlnx,bridge-vtc",
+ .of_match_table = xlnx_vtc_of_match,
+ },
+};
+
+module_platform_driver(xlnx_vtc_bridge_driver);
+
+MODULE_AUTHOR("Vishal Sagar");
+MODULE_DESCRIPTION("Xilinx VTC Bridge Driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
new file mode 100644
index 000000000000..1786a70897b5
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -0,0 +1,3333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP Display Controller Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_fb.h"
+#include "zynqmp_disp.h"
+#include "zynqmp_dp.h"
+#include "zynqmp_dpsub.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * The display part of ZynqMP DP subsystem. Internally, the device
+ * is partitioned into 3 blocks: AV buffer manager, Blender, Audio.
+ * The driver creates the DRM crtc and plane objectes and maps the DRM
+ * interface into those 3 blocks. In high level, the driver is layered
+ * in the following way:
+ *
+ * zynqmp_disp_crtc & zynqmp_disp_plane
+ * |->zynqmp_disp
+ * |->zynqmp_disp_aud
+ * |->zynqmp_disp_blend
+ * |->zynqmp_disp_av_buf
+ *
+ * The driver APIs are used externally by
+ * - zynqmp_dpsub: Top level ZynqMP DP subsystem driver
+ * - zynqmp_dp: ZynqMP DP driver
+ * - xlnx_crtc: Xilinx DRM specific crtc functions
+ */
+
+/* The default value is ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565 */
+static uint zynqmp_disp_gfx_init_fmt;
+module_param_named(gfx_init_fmt, zynqmp_disp_gfx_init_fmt, uint, 0444);
+MODULE_PARM_DESC(gfx_init_fmt, "The initial format of the graphics layer\n"
+ "\t\t0 = rgb565 (default)\n"
+ "\t\t1 = rgb888\n"
+ "\t\t2 = argb8888\n");
+/* These value should be mapped to index of av_buf_gfx_fmts[] */
+#define ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565 10
+#define ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB888 5
+#define ZYNQMP_DISP_AV_BUF_GFX_FMT_ARGB8888 1
+static const u32 zynqmp_disp_gfx_init_fmts[] = {
+ ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565,
+ ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB888,
+ ZYNQMP_DISP_AV_BUF_GFX_FMT_ARGB8888,
+};
+
+/* Blender registers */
+#define ZYNQMP_DISP_V_BLEND_BG_CLR_0 0x0
+#define ZYNQMP_DISP_V_BLEND_BG_CLR_1 0x4
+#define ZYNQMP_DISP_V_BLEND_BG_CLR_2 0x8
+#define ZYNQMP_DISP_V_BLEND_BG_MAX 0xfff
+#define ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA 0xc
+#define ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MASK 0x1fe
+#define ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX 0xff
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT 0x14
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB 0x0
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR444 0x1
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR422 0x2
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YONLY 0x3
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_XVYCC 0x4
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_EN_DOWNSAMPLE BIT(4)
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL 0x18
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_EN_US BIT(0)
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_RGB BIT(1)
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_BYPASS BIT(8)
+#define ZYNQMP_DISP_V_BLEND_NUM_COEFF 9
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF0 0x20
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF1 0x24
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF2 0x28
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF3 0x2c
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF4 0x30
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF5 0x34
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF6 0x38
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF7 0x3c
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF8 0x40
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF0 0x44
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF1 0x48
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF2 0x4c
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF3 0x50
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF4 0x54
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF5 0x58
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF6 0x5c
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF7 0x60
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF8 0x64
+#define ZYNQMP_DISP_V_BLEND_NUM_OFFSET 3
+#define ZYNQMP_DISP_V_BLEND_LUMA_IN1CSC_OFFSET 0x68
+#define ZYNQMP_DISP_V_BLEND_CR_IN1CSC_OFFSET 0x6c
+#define ZYNQMP_DISP_V_BLEND_CB_IN1CSC_OFFSET 0x70
+#define ZYNQMP_DISP_V_BLEND_LUMA_OUTCSC_OFFSET 0x74
+#define ZYNQMP_DISP_V_BLEND_CR_OUTCSC_OFFSET 0x78
+#define ZYNQMP_DISP_V_BLEND_CB_OUTCSC_OFFSET 0x7c
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF0 0x80
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF1 0x84
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF2 0x88
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF3 0x8c
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF4 0x90
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF5 0x94
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF6 0x98
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF7 0x9c
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF8 0xa0
+#define ZYNQMP_DISP_V_BLEND_LUMA_IN2CSC_OFFSET 0xa4
+#define ZYNQMP_DISP_V_BLEND_CR_IN2CSC_OFFSET 0xa8
+#define ZYNQMP_DISP_V_BLEND_CB_IN2CSC_OFFSET 0xac
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_ENABLE 0x1d0
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_COMP1 0x1d4
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_COMP2 0x1d8
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_COMP3 0x1dc
+
+/* AV buffer manager registers */
+#define ZYNQMP_DISP_AV_BUF_FMT 0x0
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_SHIFT 0
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK (0x1f << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_UYVY (0 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY (1 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YVYU (2 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV (3 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16 (4 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24 (5 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI (6 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MONO (7 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2 (8 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUV444 (9 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888 (10 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880 (11 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10 (12 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUV444_10 (13 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2_10 (14 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_10 (15 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_10 (16 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24_10 (17 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YONLY_10 (18 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420 (19 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420 (20 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2_420 (21 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420_10 (22 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420_10 (23 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2_420_10 (24 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_SHIFT 8
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK (0xf << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888 (0 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888 (1 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB888 (2 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_BGR888 (3 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551 (4 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444 (5 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565 (6 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_8BPP (7 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_4BPP (8 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_2BPP (9 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_1BPP (10 << 8)
+#define ZYNQMP_DISP_AV_BUF_NON_LIVE_LATENCY 0x8
+#define ZYNQMP_DISP_AV_BUF_CHBUF 0x10
+#define ZYNQMP_DISP_AV_BUF_CHBUF_EN BIT(0)
+#define ZYNQMP_DISP_AV_BUF_CHBUF_FLUSH BIT(1)
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT 2
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_MASK (0xf << 2)
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_MAX 0xf
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_AUD_MAX 0x3
+#define ZYNQMP_DISP_AV_BUF_STATUS 0x28
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL 0x2c
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EN BIT(0)
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_SHIFT 1
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_EX_VSYNC 0
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_EX_VID 1
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_EX_AUD 2
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_INT_VSYNC 3
+#define ZYNQMP_DISP_AV_BUF_STC_INIT_VALUE0 0x30
+#define ZYNQMP_DISP_AV_BUF_STC_INIT_VALUE1 0x34
+#define ZYNQMP_DISP_AV_BUF_STC_ADJ 0x38
+#define ZYNQMP_DISP_AV_BUF_STC_VID_VSYNC_TS0 0x3c
+#define ZYNQMP_DISP_AV_BUF_STC_VID_VSYNC_TS1 0x40
+#define ZYNQMP_DISP_AV_BUF_STC_EXT_VSYNC_TS0 0x44
+#define ZYNQMP_DISP_AV_BUF_STC_EXT_VSYNC_TS1 0x48
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT_TS0 0x4c
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT_TS1 0x50
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT2_TS0 0x54
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT2_TS1 0x58
+#define ZYNQMP_DISP_AV_BUF_STC_SNAPSHOT0 0x60
+#define ZYNQMP_DISP_AV_BUF_STC_SNAPSHOT1 0x64
+#define ZYNQMP_DISP_AV_BUF_OUTPUT 0x70
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_SHIFT 0
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK (0x3 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_LIVE (0 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MEM (1 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_PATTERN (2 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_NONE (3 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_SHIFT 2
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK (0x3 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_DISABLE (0 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM (1 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_LIVE (2 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_NONE (3 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_SHIFT 4
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK (0x3 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_PL (0 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MEM (1 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_PATTERN (2 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_DISABLE (3 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN BIT(6)
+#define ZYNQMP_DISP_AV_BUF_HCOUNT_VCOUNT_INT0 0x74
+#define ZYNQMP_DISP_AV_BUF_HCOUNT_VCOUNT_INT1 0x78
+#define ZYNQMP_DISP_AV_BUF_PATTERN_GEN_SELECT 0x100
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC 0x120
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS BIT(0)
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC_AUD_FROM_PS BIT(1)
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING BIT(2)
+#define ZYNQMP_DISP_AV_BUF_SRST_REG 0x124
+#define ZYNQMP_DISP_AV_BUF_SRST_REG_VID_RST BIT(1)
+#define ZYNQMP_DISP_AV_BUF_AUDIO_CH_CONFIG 0x12c
+#define ZYNQMP_DISP_AV_BUF_GFX_COMP0_SF 0x200
+#define ZYNQMP_DISP_AV_BUF_GFX_COMP1_SF 0x204
+#define ZYNQMP_DISP_AV_BUF_GFX_COMP2_SF 0x208
+#define ZYNQMP_DISP_AV_BUF_VID_COMP0_SF 0x20c
+#define ZYNQMP_DISP_AV_BUF_VID_COMP1_SF 0x210
+#define ZYNQMP_DISP_AV_BUF_VID_COMP2_SF 0x214
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP0_SF 0x218
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP1_SF 0x21c
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP2_SF 0x220
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_CONFIG 0x224
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP0_SF 0x228
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP1_SF 0x22c
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP2_SF 0x230
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_CONFIG 0x234
+#define ZYNQMP_DISP_AV_BUF_4BIT_SF 0x11111
+#define ZYNQMP_DISP_AV_BUF_5BIT_SF 0x10842
+#define ZYNQMP_DISP_AV_BUF_6BIT_SF 0x10410
+#define ZYNQMP_DISP_AV_BUF_8BIT_SF 0x10101
+#define ZYNQMP_DISP_AV_BUF_10BIT_SF 0x10040
+#define ZYNQMP_DISP_AV_BUF_NULL_SF 0
+#define ZYNQMP_DISP_AV_BUF_NUM_SF 3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_6 0x0
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 0x1
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_10 0x2
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_12 0x3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_MASK GENMASK(2, 0)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB 0x0
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444 0x1
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422 0x2
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YONLY 0x3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_MASK GENMASK(5, 4)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_CB_FIRST BIT(8)
+#define ZYNQMP_DISP_AV_BUF_PALETTE_MEMORY 0x400
+
+/* Audio registers */
+#define ZYNQMP_DISP_AUD_MIXER_VOLUME 0x0
+#define ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE 0x20002000
+#define ZYNQMP_DISP_AUD_MIXER_META_DATA 0x4
+#define ZYNQMP_DISP_AUD_CH_STATUS0 0x8
+#define ZYNQMP_DISP_AUD_CH_STATUS1 0xc
+#define ZYNQMP_DISP_AUD_CH_STATUS2 0x10
+#define ZYNQMP_DISP_AUD_CH_STATUS3 0x14
+#define ZYNQMP_DISP_AUD_CH_STATUS4 0x18
+#define ZYNQMP_DISP_AUD_CH_STATUS5 0x1c
+#define ZYNQMP_DISP_AUD_CH_A_DATA0 0x20
+#define ZYNQMP_DISP_AUD_CH_A_DATA1 0x24
+#define ZYNQMP_DISP_AUD_CH_A_DATA2 0x28
+#define ZYNQMP_DISP_AUD_CH_A_DATA3 0x2c
+#define ZYNQMP_DISP_AUD_CH_A_DATA4 0x30
+#define ZYNQMP_DISP_AUD_CH_A_DATA5 0x34
+#define ZYNQMP_DISP_AUD_CH_B_DATA0 0x38
+#define ZYNQMP_DISP_AUD_CH_B_DATA1 0x3c
+#define ZYNQMP_DISP_AUD_CH_B_DATA2 0x40
+#define ZYNQMP_DISP_AUD_CH_B_DATA3 0x44
+#define ZYNQMP_DISP_AUD_CH_B_DATA4 0x48
+#define ZYNQMP_DISP_AUD_CH_B_DATA5 0x4c
+#define ZYNQMP_DISP_AUD_SOFT_RESET 0xc00
+#define ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST BIT(0)
+
+#define ZYNQMP_DISP_AV_BUF_NUM_VID_GFX_BUFFERS 4
+#define ZYNQMP_DISP_AV_BUF_NUM_BUFFERS 6
+
+#define ZYNQMP_DISP_NUM_LAYERS 2
+#define ZYNQMP_DISP_MAX_NUM_SUB_PLANES 3
+/*
+ * 3840x2160 is advertised max resolution, but almost any resolutions under
+ * 300Mhz pixel rate would work. Thus put 4096 as maximum width and height.
+ */
+#define ZYNQMP_DISP_MAX_WIDTH 4096
+#define ZYNQMP_DISP_MAX_HEIGHT 4096
+/* 44 bit addressing. This is acutally DPDMA limitation */
+#define ZYNQMP_DISP_MAX_DMA_BIT 44
+
+/**
+ * enum zynqmp_disp_layer_type - Layer type (can be used for hw ID)
+ * @ZYNQMP_DISP_LAYER_VID: Video layer
+ * @ZYNQMP_DISP_LAYER_GFX: Graphics layer
+ */
+enum zynqmp_disp_layer_type {
+ ZYNQMP_DISP_LAYER_VID,
+ ZYNQMP_DISP_LAYER_GFX
+};
+
+/**
+ * enum zynqmp_disp_layer_mode - Layer mode
+ * @ZYNQMP_DISP_LAYER_NONLIVE: non-live (memory) mode
+ * @ZYNQMP_DISP_LAYER_LIVE: live (stream) mode
+ */
+enum zynqmp_disp_layer_mode {
+ ZYNQMP_DISP_LAYER_NONLIVE,
+ ZYNQMP_DISP_LAYER_LIVE
+};
+
+/**
+ * struct zynqmp_disp_layer_dma - struct for DMA engine
+ * @chan: DMA channel
+ * @is_active: flag if the DMA is active
+ * @xt: Interleaved desc config container
+ * @sgl: Data chunk for dma_interleaved_template
+ */
+struct zynqmp_disp_layer_dma {
+ struct dma_chan *chan;
+ bool is_active;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+};
+
+/**
+ * struct zynqmp_disp_layer - Display subsystem layer
+ * @plane: DRM plane
+ * @bridge: Xlnx bridge
+ * @of_node: device node
+ * @dma: struct for DMA engine
+ * @num_chan: Number of DMA channel
+ * @id: Layer ID
+ * @offset: Layer offset in the register space
+ * @enabled: flag if enabled
+ * @fmt: Current format descriptor
+ * @drm_fmts: Array of supported DRM formats
+ * @num_fmts: Number of supported DRM formats
+ * @bus_fmts: Array of supported bus formats
+ * @num_bus_fmts: Number of supported bus formats
+ * @w: Width
+ * @h: Height
+ * @mode: the operation mode
+ * @other: other layer
+ * @disp: back pointer to struct zynqmp_disp
+ */
+struct zynqmp_disp_layer {
+ struct drm_plane plane;
+ struct xlnx_bridge bridge;
+ struct device_node *of_node;
+ struct zynqmp_disp_layer_dma dma[ZYNQMP_DISP_MAX_NUM_SUB_PLANES];
+ unsigned int num_chan;
+ enum zynqmp_disp_layer_type id;
+ u32 offset;
+ u8 enabled;
+ const struct zynqmp_disp_fmt *fmt;
+ u32 *drm_fmts;
+ unsigned int num_fmts;
+ u32 *bus_fmts;
+ unsigned int num_bus_fmts;
+ u32 w;
+ u32 h;
+ enum zynqmp_disp_layer_mode mode;
+ struct zynqmp_disp_layer *other;
+ struct zynqmp_disp *disp;
+};
+
+/**
+ * struct zynqmp_disp_blend - Blender
+ * @base: Base address offset
+ */
+struct zynqmp_disp_blend {
+ void __iomem *base;
+};
+
+/**
+ * struct zynqmp_disp_av_buf - AV buffer manager
+ * @base: Base address offset
+ */
+struct zynqmp_disp_av_buf {
+ void __iomem *base;
+};
+
+/**
+ * struct zynqmp_disp_aud - Audio
+ * @base: Base address offset
+ */
+struct zynqmp_disp_aud {
+ void __iomem *base;
+};
+
+/**
+ * struct zynqmp_disp - Display subsystem
+ * @xlnx_crtc: Xilinx DRM crtc
+ * @dev: device structure
+ * @dpsub: Display subsystem
+ * @drm: DRM core
+ * @enabled: flag if enabled
+ * @blend: Blender block
+ * @av_buf: AV buffer manager block
+ * @aud:Audio block
+ * @layers: layers
+ * @g_alpha_prop: global alpha property
+ * @alpha: current global alpha value
+ * @g_alpha_en_prop: the global alpha enable property
+ * @alpha_en: flag if the global alpha is enabled
+ * @color_prop: output color format property
+ * @color: current output color value
+ * @bg_c0_prop: 1st component of background color property
+ * @bg_c0: current value of 1st background color component
+ * @bg_c1_prop: 2nd component of background color property
+ * @bg_c1: current value of 2nd background color component
+ * @bg_c2_prop: 3rd component of background color property
+ * @bg_c2: current value of 3rd background color component
+ * @tpg_prop: Test Pattern Generation mode property
+ * @tpg_on: current TPG mode state
+ * @event: pending vblank event request
+ * @_ps_pclk: Pixel clock from PS
+ * @_pl_pclk: Pixel clock from PL
+ * @pclk: Pixel clock
+ * @pclk_en: Flag if the pixel clock is enabled
+ * @_ps_audclk: Audio clock from PS
+ * @_pl_audclk: Audio clock from PL
+ * @audclk: Audio clock
+ * @audclk_en: Flag if the audio clock is enabled
+ * @aclk: APB clock
+ * @aclk_en: Flag if the APB clock is enabled
+ */
+struct zynqmp_disp {
+ struct xlnx_crtc xlnx_crtc;
+ struct device *dev;
+ struct zynqmp_dpsub *dpsub;
+ struct drm_device *drm;
+ bool enabled;
+ struct zynqmp_disp_blend blend;
+ struct zynqmp_disp_av_buf av_buf;
+ struct zynqmp_disp_aud aud;
+ struct zynqmp_disp_layer layers[ZYNQMP_DISP_NUM_LAYERS];
+ struct drm_property *g_alpha_prop;
+ u32 alpha;
+ struct drm_property *g_alpha_en_prop;
+ bool alpha_en;
+ struct drm_property *color_prop;
+ unsigned int color;
+ struct drm_property *bg_c0_prop;
+ u32 bg_c0;
+ struct drm_property *bg_c1_prop;
+ u32 bg_c1;
+ struct drm_property *bg_c2_prop;
+ u32 bg_c2;
+ struct drm_property *tpg_prop;
+ bool tpg_on;
+ struct drm_pending_vblank_event *event;
+ /* Don't operate directly on _ps_ */
+ struct clk *_ps_pclk;
+ struct clk *_pl_pclk;
+ struct clk *pclk;
+ bool pclk_en;
+ struct clk *_ps_audclk;
+ struct clk *_pl_audclk;
+ struct clk *audclk;
+ bool audclk_en;
+ struct clk *aclk;
+ bool aclk_en;
+};
+
+/**
+ * struct zynqmp_disp_fmt - Display subsystem format mapping
+ * @drm_fmt: drm format
+ * @disp_fmt: Display subsystem format
+ * @bus_fmt: Bus formats (live formats)
+ * @rgb: flag for RGB formats
+ * @swap: flag to swap r & b for rgb formats, and u & v for yuv formats
+ * @chroma_sub: flag for chroma subsampled formats
+ * @sf: scaling factors for upto 3 color components
+ */
+struct zynqmp_disp_fmt {
+ u32 drm_fmt;
+ u32 disp_fmt;
+ u32 bus_fmt;
+ bool rgb;
+ bool swap;
+ bool chroma_sub;
+ u32 sf[3];
+};
+
+static void zynqmp_disp_write(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static u32 zynqmp_disp_read(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static void zynqmp_disp_clr(void __iomem *base, int offset, u32 clr)
+{
+ zynqmp_disp_write(base, offset, zynqmp_disp_read(base, offset) & ~clr);
+}
+
+static void zynqmp_disp_set(void __iomem *base, int offset, u32 set)
+{
+ zynqmp_disp_write(base, offset, zynqmp_disp_read(base, offset) | set);
+}
+
+/*
+ * Clock functions
+ */
+
+/**
+ * zynqmp_disp_clk_enable - Enable the clock if needed
+ * @clk: clk device
+ * @flag: flag if the clock is enabled
+ *
+ * Enable the clock only if it's not enabled @flag.
+ *
+ * Return: value from clk_prepare_enable().
+ */
+static int zynqmp_disp_clk_enable(struct clk *clk, bool *flag)
+{
+ int ret = 0;
+
+ if (!*flag) {
+ ret = clk_prepare_enable(clk);
+ if (!ret)
+ *flag = true;
+ }
+
+ return ret;
+}
+
+/**
+ * zynqmp_disp_clk_enable - Enable the clock if needed
+ * @clk: clk device
+ * @flag: flag if the clock is enabled
+ *
+ * Disable the clock only if it's enabled @flag.
+ */
+static void zynqmp_disp_clk_disable(struct clk *clk, bool *flag)
+{
+ if (*flag) {
+ clk_disable_unprepare(clk);
+ *flag = false;
+ }
+}
+
+/**
+ * zynqmp_disp_clk_enable - Enable and disable the clock
+ * @clk: clk device
+ * @flag: flag if the clock is enabled
+ *
+ * This is to ensure the clock is disabled. The initial hardware state is
+ * unknown, and this makes sure that the clock is disabled.
+ *
+ * Return: value from clk_prepare_enable().
+ */
+static int zynqmp_disp_clk_enable_disable(struct clk *clk, bool *flag)
+{
+ int ret = 0;
+
+ if (!*flag) {
+ ret = clk_prepare_enable(clk);
+ clk_disable_unprepare(clk);
+ }
+
+ return ret;
+}
+
+/*
+ * Blender functions
+ */
+
+/**
+ * zynqmp_disp_blend_set_output_fmt - Set the output format of the blend
+ * @blend: blend object
+ * @fmt: output format
+ *
+ * Set the output format to @fmt.
+ */
+static void
+zynqmp_disp_blend_set_output_fmt(struct zynqmp_disp_blend *blend, u32 fmt)
+{
+ u16 reset_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u32 reset_offsets[] = { 0x0, 0x0, 0x0 };
+ u16 sdtv_coeffs[] = { 0x4c9, 0x864, 0x1d3,
+ 0x7d4d, 0x7ab3, 0x800,
+ 0x800, 0x794d, 0x7eb3 };
+ u32 full_range_offsets[] = { 0x0, 0x8000000, 0x8000000 };
+ u16 *coeffs;
+ u32 *offsets;
+ u32 offset, i;
+
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT, fmt);
+ if (fmt == ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB) {
+ coeffs = reset_coeffs;
+ offsets = reset_offsets;
+ } else {
+ /* Hardcode Full-range SDTV values. Can be runtime config */
+ coeffs = sdtv_coeffs;
+ offsets = full_range_offsets;
+ }
+
+ offset = ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF0;
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_COEFF; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, coeffs[i]);
+
+ offset = ZYNQMP_DISP_V_BLEND_LUMA_OUTCSC_OFFSET;
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_OFFSET; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, offsets[i]);
+}
+
+/**
+ * zynqmp_disp_blend_layer_coeff - Set the coefficients for @layer
+ * @blend: blend object
+ * @layer: layer to set the coefficients for
+ * @on: if layer is on / off
+ *
+ * Depending on the format (rgb / yuv and swap), and the status (on / off),
+ * this function sets the coefficients for the given layer @layer accordingly.
+ */
+static void zynqmp_disp_blend_layer_coeff(struct zynqmp_disp_blend *blend,
+ struct zynqmp_disp_layer *layer,
+ bool on)
+{
+ u32 offset, i, s0, s1;
+ u16 sdtv_coeffs[] = { 0x1000, 0x166f, 0x0,
+ 0x1000, 0x7483, 0x7a7f,
+ 0x1000, 0x0, 0x1c5a };
+ u16 sdtv_coeffs_yonly[] = { 0x0, 0x0, 0x1000,
+ 0x0, 0x0, 0x1000,
+ 0x0, 0x0, 0x1000 };
+ u16 swap_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u16 null_coeffs[] = { 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0 };
+ u16 *coeffs;
+ u32 sdtv_offsets[] = { 0x0, 0x1800, 0x1800 };
+ u32 sdtv_offsets_yonly[] = { 0x1800, 0x1800, 0x0 };
+ u32 null_offsets[] = { 0x0, 0x0, 0x0 };
+ u32 *offsets;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_VID)
+ offset = ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF0;
+ else
+ offset = ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF0;
+
+ if (!on) {
+ coeffs = null_coeffs;
+ offsets = null_offsets;
+ } else {
+ if (!layer->fmt->rgb) {
+ /*
+ * In case of Y_ONLY formats, pixels are unpacked
+ * differently compared to YCbCr
+ */
+ if (layer->fmt->drm_fmt == DRM_FORMAT_Y8 ||
+ layer->fmt->drm_fmt == DRM_FORMAT_Y10) {
+ coeffs = sdtv_coeffs_yonly;
+ offsets = sdtv_offsets_yonly;
+ } else {
+ coeffs = sdtv_coeffs;
+ offsets = sdtv_offsets;
+ }
+
+ s0 = 1;
+ s1 = 2;
+ } else {
+ coeffs = swap_coeffs;
+ s0 = 0;
+ s1 = 2;
+
+ /* No offset for RGB formats */
+ offsets = null_offsets;
+ }
+
+ if (layer->fmt->swap) {
+ for (i = 0; i < 3; i++) {
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ coeffs[i * 3 + s1] ^= coeffs[i * 3 + s0];
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ }
+ }
+ }
+
+ /* Program coefficients. Can be runtime configurable */
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_COEFF; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, coeffs[i]);
+
+ if (layer->id == ZYNQMP_DISP_LAYER_VID)
+ offset = ZYNQMP_DISP_V_BLEND_LUMA_IN1CSC_OFFSET;
+ else
+ offset = ZYNQMP_DISP_V_BLEND_LUMA_IN2CSC_OFFSET;
+
+ /* Program offsets. Can be runtime configurable */
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_OFFSET; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, offsets[i]);
+}
+
+/**
+ * zynqmp_disp_blend_layer_enable - Enable a layer
+ * @blend: blend object
+ * @layer: layer to enable
+ *
+ * Enable a layer @layer.
+ */
+static void zynqmp_disp_blend_layer_enable(struct zynqmp_disp_blend *blend,
+ struct zynqmp_disp_layer *layer)
+{
+ u32 reg;
+
+ reg = layer->fmt->rgb ? ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_RGB : 0;
+ reg |= layer->fmt->chroma_sub ?
+ ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_EN_US : 0;
+
+ zynqmp_disp_write(blend->base,
+ ZYNQMP_DISP_V_BLEND_LAYER_CONTROL + layer->offset,
+ reg);
+
+ zynqmp_disp_blend_layer_coeff(blend, layer, true);
+}
+
+/**
+ * zynqmp_disp_blend_layer_disable - Disable a layer
+ * @blend: blend object
+ * @layer: layer to disable
+ *
+ * Disable a layer @layer.
+ */
+static void zynqmp_disp_blend_layer_disable(struct zynqmp_disp_blend *blend,
+ struct zynqmp_disp_layer *layer)
+{
+ zynqmp_disp_write(blend->base,
+ ZYNQMP_DISP_V_BLEND_LAYER_CONTROL + layer->offset, 0);
+
+ zynqmp_disp_blend_layer_coeff(blend, layer, false);
+}
+
+/**
+ * zynqmp_disp_blend_set_bg_color - Set the background color
+ * @blend: blend object
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color.
+ */
+static void zynqmp_disp_blend_set_bg_color(struct zynqmp_disp_blend *blend,
+ u32 c0, u32 c1, u32 c2)
+{
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_BG_CLR_0, c0);
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_BG_CLR_1, c1);
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_BG_CLR_2, c2);
+}
+
+/**
+ * zynqmp_disp_blend_set_alpha - Set the alpha for blending
+ * @blend: blend object
+ * @alpha: alpha value to be used
+ *
+ * Set the alpha for blending.
+ */
+static void
+zynqmp_disp_blend_set_alpha(struct zynqmp_disp_blend *blend, u32 alpha)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(blend->base,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA);
+ reg &= ~ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MASK;
+ reg |= alpha << 1;
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA,
+ reg);
+}
+
+/**
+ * zynqmp_disp_blend_enable_alpha - Enable/disable the global alpha
+ * @blend: blend object
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Enable/disable the global alpha blending based on @enable.
+ */
+static void
+zynqmp_disp_blend_enable_alpha(struct zynqmp_disp_blend *blend, bool enable)
+{
+ if (enable)
+ zynqmp_disp_set(blend->base,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+ else
+ zynqmp_disp_clr(blend->base,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+}
+
+/* List of blend output formats */
+/* The id / order should be aligned with zynqmp_disp_color_enum */
+static const struct zynqmp_disp_fmt blend_output_fmts[] = {
+ {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB,
+ }, {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR444,
+ }, {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR422,
+ }, {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YONLY,
+ }
+};
+
+/*
+ * AV buffer manager functions
+ */
+
+/* List of video layer formats */
+#define ZYNQMP_DISP_AV_BUF_VID_FMT_YUYV 2
+static const struct zynqmp_disp_fmt av_buf_vid_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_VYUY,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_UYVY,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUYV,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVYU,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU422,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV16,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV61,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_Y8,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MONO,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_Y10,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YONLY_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR2101010,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB2101010,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV420,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU420,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV12,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV21,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XV15,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XV20,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }
+};
+
+/* List of graphics layer formats */
+static const struct zynqmp_disp_fmt av_buf_gfx_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_ABGR8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_ARGB8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_BGR888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA5551,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA5551,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA4444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA4444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB565,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR565,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }
+};
+
+/* List of live formats */
+/* Format can be combination of color, bpc, and cb-cr order.
+ * - Color: RGB / YUV444 / YUV422 / Y only
+ * - BPC: 6, 8, 10, 12
+ * - Swap: Cb and Cr swap
+ * which can be 32 bus formats. Only list the subset of those for now.
+ */
+static const struct zynqmp_disp_fmt av_buf_live_fmts[] = {
+ {
+ .bus_fmt = MEDIA_BUS_FMT_RGB666_1X18,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_6 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_RBG888_1X24,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_UYVY8_1X16,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_VUY8_1X24,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_UYVY10_1X20,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_10 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }
+};
+
+/**
+ * zynqmp_disp_av_buf_set_fmt - Set the input formats
+ * @av_buf: av buffer manager
+ * @fmt: formats
+ *
+ * Set the av buffer manager format to @fmt. @fmt should have valid values
+ * for both video and graphics layer.
+ */
+static void
+zynqmp_disp_av_buf_set_fmt(struct zynqmp_disp_av_buf *av_buf, u32 fmt)
+{
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_FMT, fmt);
+}
+
+/**
+ * zynqmp_disp_av_buf_get_fmt - Get the input formats
+ * @av_buf: av buffer manager
+ *
+ * Get the input formats (which include video and graphics) of
+ * av buffer manager.
+ *
+ * Return: value of ZYNQMP_DISP_AV_BUF_FMT register.
+ */
+static u32
+zynqmp_disp_av_buf_get_fmt(struct zynqmp_disp_av_buf *av_buf)
+{
+ return zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_FMT);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_live_fmt - Set the live_input format
+ * @av_buf: av buffer manager
+ * @fmt: format
+ * @is_vid: if it's for video layer
+ *
+ * Set the live input format to @fmt. @fmt should have valid values.
+ * @vid will determine if it's for video layer or graphics layer
+ * @fmt should be a valid hardware value.
+ */
+static void zynqmp_disp_av_buf_set_live_fmt(struct zynqmp_disp_av_buf *av_buf,
+ u32 fmt, bool is_vid)
+{
+ u32 offset;
+
+ if (is_vid)
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_VID_CONFIG;
+ else
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_GFX_CONFIG;
+
+ zynqmp_disp_write(av_buf->base, offset, fmt);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_vid_clock_src - Set the video clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the video clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void
+zynqmp_disp_av_buf_set_vid_clock_src(struct zynqmp_disp_av_buf *av_buf,
+ bool from_ps)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ if (from_ps)
+ reg |= ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_vid_clock_src_is_ps - if ps clock is used
+ * @av_buf: av buffer manager
+ *
+ * Return: if ps clock is used
+ */
+static bool
+zynqmp_disp_av_buf_vid_clock_src_is_ps(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ return !!(reg & ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_vid_timing_src - Set the video timing source
+ * @av_buf: av buffer manager
+ * @internal: flag if the video timing is generated internally
+ *
+ * Set the video timing source based on @internal. It can come externally or
+ * be generated internally.
+ */
+static void
+zynqmp_disp_av_buf_set_vid_timing_src(struct zynqmp_disp_av_buf *av_buf,
+ bool internal)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ if (internal)
+ reg |= ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_vid_timing_src_is_int - if internal timing is used
+ * @av_buf: av buffer manager
+ *
+ * Return: if the internal timing is used
+ */
+static bool
+zynqmp_disp_av_buf_vid_timing_src_is_int(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ return !!(reg & ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_aud_clock_src - Set the audio clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the audio clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void
+zynqmp_disp_av_buf_set_aud_clock_src(struct zynqmp_disp_av_buf *av_buf,
+ bool from_ps)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ if (from_ps)
+ reg |= ZYNQMP_DISP_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable_buf - Enable buffers
+ * @av_buf: av buffer manager
+ *
+ * Enable all (video and audio) buffers.
+ */
+static void
+zynqmp_disp_av_buf_enable_buf(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = ZYNQMP_DISP_AV_BUF_CHBUF_EN;
+ reg |= ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_MAX <<
+ ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_VID_GFX_BUFFERS; i++)
+ zynqmp_disp_write(av_buf->base,
+ ZYNQMP_DISP_AV_BUF_CHBUF + i * 4, reg);
+
+ reg = ZYNQMP_DISP_AV_BUF_CHBUF_EN;
+ reg |= ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_AUD_MAX <<
+ ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (; i < ZYNQMP_DISP_AV_BUF_NUM_BUFFERS; i++)
+ zynqmp_disp_write(av_buf->base,
+ ZYNQMP_DISP_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable_buf - Disable buffers
+ * @av_buf: av buffer manager
+ *
+ * Disable all (video and audio) buffers.
+ */
+static void
+zynqmp_disp_av_buf_disable_buf(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = ZYNQMP_DISP_AV_BUF_CHBUF_FLUSH & ~ZYNQMP_DISP_AV_BUF_CHBUF_EN;
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_BUFFERS; i++)
+ zynqmp_disp_write(av_buf->base,
+ ZYNQMP_DISP_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable_aud - Enable audio
+ * @av_buf: av buffer manager
+ *
+ * Enable all audio buffers.
+ */
+static void
+zynqmp_disp_av_buf_enable_aud(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MEM;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable - Enable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * De-assert the video pipe reset
+ */
+static void
+zynqmp_disp_av_buf_enable(struct zynqmp_disp_av_buf *av_buf)
+{
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_SRST_REG, 0);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable - Disable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * Assert the video pipe reset
+ */
+static void
+zynqmp_disp_av_buf_disable(struct zynqmp_disp_av_buf *av_buf)
+{
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_SRST_REG,
+ ZYNQMP_DISP_AV_BUF_SRST_REG_VID_RST);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable_aud - Disable audio
+ * @av_buf: av buffer manager
+ *
+ * Disable all audio buffers.
+ */
+static void
+zynqmp_disp_av_buf_disable_aud(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_DISABLE;
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_tpg - Set TPG mode
+ * @av_buf: av buffer manager
+ * @tpg_on: if TPG should be on
+ *
+ * Set the TPG mode based on @tpg_on.
+ */
+static void zynqmp_disp_av_buf_set_tpg(struct zynqmp_disp_av_buf *av_buf,
+ bool tpg_on)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
+ if (tpg_on)
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_PATTERN;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_PATTERN;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable_vid - Enable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to enable
+ * @mode: operation mode of layer
+ *
+ * Enable the video/graphics buffer for @layer.
+ */
+static void zynqmp_disp_av_buf_enable_vid(struct zynqmp_disp_av_buf *av_buf,
+ struct zynqmp_disp_layer *layer,
+ enum zynqmp_disp_layer_mode mode)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID) {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
+ if (mode == ZYNQMP_DISP_LAYER_NONLIVE)
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MEM;
+ else
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_LIVE;
+ } else {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM;
+ if (mode == ZYNQMP_DISP_LAYER_NONLIVE)
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM;
+ else
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_LIVE;
+ }
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable_vid - Disable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to disable
+ *
+ * Disable the video/graphics buffer for @layer.
+ */
+static void
+zynqmp_disp_av_buf_disable_vid(struct zynqmp_disp_av_buf *av_buf,
+ struct zynqmp_disp_layer *layer)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID) {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_NONE;
+ } else {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_DISABLE;
+ }
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_init_sf - Initialize scaling factors
+ * @av_buf: av buffer manager
+ * @vid_fmt: video format descriptor
+ * @gfx_fmt: graphics format descriptor
+ *
+ * Initialize scaling factors for both video and graphics layers.
+ * If the format descriptor is NULL, the function skips the programming.
+ */
+static void zynqmp_disp_av_buf_init_sf(struct zynqmp_disp_av_buf *av_buf,
+ const struct zynqmp_disp_fmt *vid_fmt,
+ const struct zynqmp_disp_fmt *gfx_fmt)
+{
+ unsigned int i;
+ u32 offset;
+
+ if (gfx_fmt) {
+ offset = ZYNQMP_DISP_AV_BUF_GFX_COMP0_SF;
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++)
+ zynqmp_disp_write(av_buf->base, offset + i * 4,
+ gfx_fmt->sf[i]);
+ }
+
+ if (vid_fmt) {
+ offset = ZYNQMP_DISP_AV_BUF_VID_COMP0_SF;
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++)
+ zynqmp_disp_write(av_buf->base, offset + i * 4,
+ vid_fmt->sf[i]);
+ }
+}
+
+/**
+ * zynqmp_disp_av_buf_init_live_sf - Initialize scaling factors for live source
+ * @av_buf: av buffer manager
+ * @fmt: format descriptor
+ * @is_vid: flag if this is for video layer
+ *
+ * Initialize scaling factors for live source.
+ */
+static void zynqmp_disp_av_buf_init_live_sf(struct zynqmp_disp_av_buf *av_buf,
+ const struct zynqmp_disp_fmt *fmt,
+ bool is_vid)
+{
+ unsigned int i;
+ u32 offset;
+
+ if (is_vid)
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP0_SF;
+ else
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP0_SF;
+
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++)
+ zynqmp_disp_write(av_buf->base, offset + i * 4,
+ fmt->sf[i]);
+}
+
+/*
+ * Audio functions
+ */
+
+/**
+ * zynqmp_disp_aud_init - Initialize the audio
+ * @aud: audio
+ *
+ * Initialize the audio with default mixer volume. The de-assertion will
+ * initialize the audio states.
+ */
+static void zynqmp_disp_aud_init(struct zynqmp_disp_aud *aud)
+{
+ /* Clear the audio soft reset register as it's an non-reset flop */
+ zynqmp_disp_write(aud->base, ZYNQMP_DISP_AUD_SOFT_RESET, 0);
+ zynqmp_disp_write(aud->base, ZYNQMP_DISP_AUD_MIXER_VOLUME,
+ ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE);
+}
+
+/**
+ * zynqmp_disp_aud_deinit - De-initialize the audio
+ * @aud: audio
+ *
+ * Put the audio in reset.
+ */
+static void zynqmp_disp_aud_deinit(struct zynqmp_disp_aud *aud)
+{
+ zynqmp_disp_set(aud->base, ZYNQMP_DISP_AUD_SOFT_RESET,
+ ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST);
+}
+
+/*
+ * ZynqMP Display layer functions
+ */
+
+/**
+ * zynqmp_disp_layer_check_size - Verify width and height for the layer
+ * @disp: Display subsystem
+ * @layer: layer
+ * @width: width
+ * @height: height
+ *
+ * The Display subsystem has the limitation that both layers should have
+ * identical size. This function stores width and height of @layer, and verifies
+ * if the size (width and height) is valid.
+ *
+ * Return: 0 on success, or -EINVAL if width or/and height is invalid.
+ */
+static int zynqmp_disp_layer_check_size(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ u32 width, u32 height)
+{
+ struct zynqmp_disp_layer *other = layer->other;
+
+ if (other->enabled && (other->w != width || other->h != height)) {
+ dev_err(disp->dev, "Layer width:height must be %d:%d\n",
+ other->w, other->h);
+ return -EINVAL;
+ }
+
+ layer->w = width;
+ layer->h = height;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_map_fmt - Find the Display subsystem format for given drm format
+ * @fmts: format table to look up
+ * @size: size of the table @fmts
+ * @drm_fmt: DRM format to search
+ *
+ * Search a Display subsystem format corresponding to the given DRM format
+ * @drm_fmt, and return the format descriptor which contains the Display
+ * subsystem format value.
+ *
+ * Return: a Display subsystem format descriptor on success, or NULL.
+ */
+static const struct zynqmp_disp_fmt *
+zynqmp_disp_map_fmt(const struct zynqmp_disp_fmt fmts[],
+ unsigned int size, uint32_t drm_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (fmts[i].drm_fmt == drm_fmt)
+ return &fmts[i];
+
+ return NULL;
+}
+
+/**
+ * zynqmp_disp_set_fmt - Set the format of the layer
+ * @disp: Display subsystem
+ * @layer: layer to set the format
+ * @drm_fmt: DRM format to set
+ *
+ * Set the format of the given layer to @drm_fmt.
+ *
+ * Return: 0 on success. -EINVAL if @drm_fmt is not supported by the layer.
+ */
+static int zynqmp_disp_layer_set_fmt(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ uint32_t drm_fmt)
+{
+ const struct zynqmp_disp_fmt *fmt;
+ const struct zynqmp_disp_fmt *vid_fmt = NULL, *gfx_fmt = NULL;
+ u32 size, fmts, mask;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_VID) {
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ mask = ~ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK;
+ fmt = zynqmp_disp_map_fmt(av_buf_vid_fmts, size, drm_fmt);
+ vid_fmt = fmt;
+ } else {
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ mask = ~ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK;
+ fmt = zynqmp_disp_map_fmt(av_buf_gfx_fmts, size, drm_fmt);
+ gfx_fmt = fmt;
+ }
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmts = zynqmp_disp_av_buf_get_fmt(&disp->av_buf);
+ fmts &= mask;
+ fmts |= fmt->disp_fmt;
+ zynqmp_disp_av_buf_set_fmt(&disp->av_buf, fmts);
+ zynqmp_disp_av_buf_init_sf(&disp->av_buf, vid_fmt, gfx_fmt);
+ layer->fmt = fmt;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_map_live_fmt - Find the hardware format for given bus format
+ * @fmts: format table to look up
+ * @size: size of the table @fmts
+ * @bus_fmt: bus format to search
+ *
+ * Search a Display subsystem format corresponding to the given bus format
+ * @bus_fmt, and return the format descriptor which contains the Display
+ * subsystem format value.
+ *
+ * Return: a Display subsystem format descriptor on success, or NULL.
+ */
+static const struct zynqmp_disp_fmt *
+zynqmp_disp_map_live_fmt(const struct zynqmp_disp_fmt fmts[],
+ unsigned int size, uint32_t bus_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (fmts[i].bus_fmt == bus_fmt)
+ return &fmts[i];
+
+ return NULL;
+}
+
+/**
+ * zynqmp_disp_set_live_fmt - Set the live format of the layer
+ * @disp: Display subsystem
+ * @layer: layer to set the format
+ * @bus_fmt: bus format to set
+ *
+ * Set the live format of the given layer to @live_fmt.
+ *
+ * Return: 0 on success. -EINVAL if @bus_fmt is not supported by the layer.
+ */
+static int zynqmp_disp_layer_set_live_fmt(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ uint32_t bus_fmt)
+{
+ const struct zynqmp_disp_fmt *fmt;
+ u32 size;
+ bool is_vid = layer->id == ZYNQMP_DISP_LAYER_VID;
+
+ size = ARRAY_SIZE(av_buf_live_fmts);
+ fmt = zynqmp_disp_map_live_fmt(av_buf_live_fmts, size, bus_fmt);
+ if (!fmt)
+ return -EINVAL;
+
+ zynqmp_disp_av_buf_set_live_fmt(&disp->av_buf, fmt->disp_fmt, is_vid);
+ zynqmp_disp_av_buf_init_live_sf(&disp->av_buf, fmt, is_vid);
+ layer->fmt = fmt;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_set_tpg - Enable or disable TPG
+ * @disp: Display subsystem
+ * @layer: Video layer
+ * @tpg_on: flag if TPG needs to be enabled or disabled
+ *
+ * Enable / disable the TPG mode on the video layer @layer depending on
+ * @tpg_on. The video layer should be disabled prior to enable request.
+ *
+ * Return: 0 on success. -ENODEV if it's not video layer. -EIO if
+ * the video layer is enabled.
+ */
+static int zynqmp_disp_layer_set_tpg(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ bool tpg_on)
+{
+ if (layer->id != ZYNQMP_DISP_LAYER_VID) {
+ dev_err(disp->dev,
+ "only the video layer has the tpg mode\n");
+ return -ENODEV;
+ }
+
+ if (layer->enabled) {
+ dev_err(disp->dev,
+ "the video layer should be disabled for tpg mode\n");
+ return -EIO;
+ }
+
+ zynqmp_disp_blend_layer_coeff(&disp->blend, layer, tpg_on);
+ zynqmp_disp_av_buf_set_tpg(&disp->av_buf, tpg_on);
+ disp->tpg_on = tpg_on;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_get_tpg - Get the TPG mode status
+ * @disp: Display subsystem
+ * @layer: Video layer
+ *
+ * Return if the TPG is enabled or not.
+ *
+ * Return: true if TPG is on, otherwise false
+ */
+static bool zynqmp_disp_layer_get_tpg(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer)
+{
+ return disp->tpg_on;
+}
+
+/**
+ * zynqmp_disp_get_fmt - Get the supported DRM formats of the layer
+ * @disp: Display subsystem
+ * @layer: layer to get the formats
+ * @drm_fmts: pointer to array of DRM format strings
+ * @num_fmts: pointer to number of returned DRM formats
+ *
+ * Get the supported DRM formats of the given layer.
+ */
+static void zynqmp_disp_layer_get_fmts(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ u32 **drm_fmts, unsigned int *num_fmts)
+{
+ *drm_fmts = layer->drm_fmts;
+ *num_fmts = layer->num_fmts;
+}
+
+/**
+ * zynqmp_disp_layer_enable - Enable the layer
+ * @disp: Display subsystem
+ * @layer: layer to esable
+ * @mode: operation mode
+ *
+ * Enable the layer @layer.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+static int zynqmp_disp_layer_enable(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ enum zynqmp_disp_layer_mode mode)
+{
+ struct device *dev = disp->dev;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ unsigned int i;
+
+ if (layer->enabled && layer->mode != mode) {
+ dev_err(dev, "layer is already enabled in different mode\n");
+ return -EBUSY;
+ }
+
+ zynqmp_disp_av_buf_enable_vid(&disp->av_buf, layer, mode);
+ zynqmp_disp_blend_layer_enable(&disp->blend, layer);
+
+ layer->enabled = true;
+ layer->mode = mode;
+
+ if (mode == ZYNQMP_DISP_LAYER_LIVE)
+ return 0;
+
+ for (i = 0; i < ZYNQMP_DISP_MAX_NUM_SUB_PLANES; i++) {
+ struct zynqmp_disp_layer_dma *dma = &layer->dma[i];
+
+ if (dma->chan && dma->is_active) {
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma->chan,
+ &dma->xt, flags);
+ if (!desc) {
+ dev_err(dev, "failed to prep DMA descriptor\n");
+ return -ENOMEM;
+ }
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_layer_disable - Disable the layer
+ * @disp: Display subsystem
+ * @layer: layer to disable
+ * @mode: operation mode
+ *
+ * Disable the layer @layer.
+ *
+ * Return: 0 on success, or -EBUSY if the layer is in different mode.
+ */
+static int zynqmp_disp_layer_disable(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ enum zynqmp_disp_layer_mode mode)
+{
+ struct device *dev = disp->dev;
+ unsigned int i;
+
+ if (layer->mode != mode) {
+ dev_err(dev, "the layer is operating in different mode\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < ZYNQMP_DISP_MAX_NUM_SUB_PLANES; i++)
+ if (layer->dma[i].chan && layer->dma[i].is_active)
+ dmaengine_terminate_sync(layer->dma[i].chan);
+
+ zynqmp_disp_av_buf_disable_vid(&disp->av_buf, layer);
+ zynqmp_disp_blend_layer_disable(&disp->blend, layer);
+ layer->enabled = false;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_layer_request_dma - Request DMA channels for a layer
+ * @disp: Display subsystem
+ * @layer: layer to request DMA channels
+ * @name: identifier string for layer type
+ *
+ * Request DMA engine channels for corresponding layer.
+ *
+ * Return: 0 on success, or err value from of_dma_request_slave_channel().
+ */
+static int
+zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer, const char *name)
+{
+ struct zynqmp_disp_layer_dma *dma;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < layer->num_chan; i++) {
+ char temp[16];
+
+ dma = &layer->dma[i];
+ snprintf(temp, sizeof(temp), "%s%d", name, i);
+ dma->chan = of_dma_request_slave_channel(layer->of_node,
+ temp);
+ if (IS_ERR(dma->chan)) {
+ dev_err(disp->dev, "failed to request dma channel\n");
+ ret = PTR_ERR(dma->chan);
+ dma->chan = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_layer_release_dma - Release DMA channels for a layer
+ * @disp: Display subsystem
+ * @layer: layer to release DMA channels
+ *
+ * Release the dma channels associated with @layer.
+ */
+static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer)
+{
+ unsigned int i;
+
+ for (i = 0; i < layer->num_chan; i++) {
+ if (layer->dma[i].chan) {
+ /* Make sure the channel is terminated before release */
+ dmaengine_terminate_all(layer->dma[i].chan);
+ dma_release_channel(layer->dma[i].chan);
+ }
+ }
+}
+
+/**
+ * zynqmp_disp_layer_is_live - if any layer is live
+ * @disp: Display subsystem
+ *
+ * Return: true if any layer is live
+ */
+static bool zynqmp_disp_layer_is_live(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ if (disp->layers[i].enabled &&
+ disp->layers[i].mode == ZYNQMP_DISP_LAYER_LIVE)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * zynqmp_disp_layer_is_enabled - if any layer is enabled
+ * @disp: Display subsystem
+ *
+ * Return: true if any layer is enabled
+ */
+static bool zynqmp_disp_layer_is_enabled(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
+ if (disp->layers[i].enabled)
+ return true;
+
+ return false;
+}
+
+/**
+ * zynqmp_disp_layer_destroy - Destroy all layers
+ * @disp: Display subsystem
+ *
+ * Destroy all layers.
+ */
+static void zynqmp_disp_layer_destroy(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ zynqmp_disp_layer_release_dma(disp, &disp->layers[i]);
+ if (disp->layers[i].of_node)
+ of_node_put(disp->layers[i].of_node);
+ }
+}
+
+/**
+ * zynqmp_disp_layer_create - Create all layers
+ * @disp: Display subsystem
+ *
+ * Create all layers.
+ *
+ * Return: 0 on success, otherwise error code from failed function
+ */
+static int zynqmp_disp_layer_create(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ unsigned int i;
+ int num_chans[ZYNQMP_DISP_NUM_LAYERS] = { 3, 1 };
+ const char * const dma_name[] = { "vid", "gfx" };
+ int ret;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ char temp[16];
+
+ layer = &disp->layers[i];
+ layer->id = i;
+ layer->offset = i * 4;
+ layer->other = &disp->layers[!i];
+ layer->num_chan = num_chans[i];
+ snprintf(temp, sizeof(temp), "%s-layer", dma_name[i]);
+ layer->of_node = of_get_child_by_name(disp->dev->of_node, temp);
+ if (!layer->of_node)
+ goto err;
+ ret = zynqmp_disp_layer_request_dma(disp, layer, dma_name[i]);
+ if (ret)
+ goto err;
+ layer->disp = disp;
+ }
+
+ return 0;
+
+err:
+ zynqmp_disp_layer_destroy(disp);
+ return ret;
+}
+
+/*
+ * ZynqMP Display internal functions
+ */
+
+/*
+ * Output format enumeration.
+ * The ID should be aligned with blend_output_fmts.
+ * The string should be aligned with how zynqmp_dp_set_color() decodes.
+ */
+static struct drm_prop_enum_list zynqmp_disp_color_enum[] = {
+ { 0, "rgb" },
+ { 1, "ycrcb444" },
+ { 2, "ycrcb422" },
+ { 3, "yonly" },
+};
+
+/**
+ * zynqmp_disp_set_output_fmt - Set the output format
+ * @disp: Display subsystem
+ * @id: the format ID. Refer to zynqmp_disp_color_enum[].
+ *
+ * This function sets the output format of the display / blender as well as
+ * the format of DP controller. The @id should be aligned with
+ * zynqmp_disp_color_enum.
+ */
+static void
+zynqmp_disp_set_output_fmt(struct zynqmp_disp *disp, unsigned int id)
+{
+ const struct zynqmp_disp_fmt *fmt = &blend_output_fmts[id];
+
+ zynqmp_dp_set_color(disp->dpsub->dp, zynqmp_disp_color_enum[id].name);
+ zynqmp_disp_blend_set_output_fmt(&disp->blend, fmt->disp_fmt);
+}
+
+/**
+ * zynqmp_disp_set_bg_color - Set the background color
+ * @disp: Display subsystem
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color with given color components (@c0, @c1, @c2).
+ */
+static void zynqmp_disp_set_bg_color(struct zynqmp_disp *disp,
+ u32 c0, u32 c1, u32 c2)
+{
+ zynqmp_disp_blend_set_bg_color(&disp->blend, c0, c1, c2);
+}
+
+/**
+ * zynqmp_disp_set_alpha - Set the alpha value
+ * @disp: Display subsystem
+ * @alpha: alpha value to set
+ *
+ * Set the alpha value for blending.
+ */
+static void zynqmp_disp_set_alpha(struct zynqmp_disp *disp, u32 alpha)
+{
+ disp->alpha = alpha;
+ zynqmp_disp_blend_set_alpha(&disp->blend, alpha);
+}
+
+/**
+ * zynqmp_disp_get_alpha - Get the alpha value
+ * @disp: Display subsystem
+ *
+ * Get the alpha value for blending.
+ *
+ * Return: current alpha value.
+ */
+static u32 zynqmp_disp_get_alpha(struct zynqmp_disp *disp)
+{
+ return disp->alpha;
+}
+
+/**
+ * zynqmp_disp_set_g_alpha - Enable/disable the global alpha blending
+ * @disp: Display subsystem
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Set the alpha value for blending.
+ */
+static void zynqmp_disp_set_g_alpha(struct zynqmp_disp *disp, bool enable)
+{
+ disp->alpha_en = enable;
+ zynqmp_disp_blend_enable_alpha(&disp->blend, enable);
+}
+
+/**
+ * zynqmp_disp_get_g_alpha - Get the global alpha status
+ * @disp: Display subsystem
+ *
+ * Get the global alpha statue.
+ *
+ * Return: true if global alpha is enabled, or false.
+ */
+static bool zynqmp_disp_get_g_alpha(struct zynqmp_disp *disp)
+{
+ return disp->alpha_en;
+}
+
+/**
+ * zynqmp_disp_enable - Enable the Display subsystem
+ * @disp: Display subsystem
+ *
+ * Enable the Display subsystem.
+ */
+static void zynqmp_disp_enable(struct zynqmp_disp *disp)
+{
+ bool live;
+
+ if (disp->enabled)
+ return;
+
+ zynqmp_disp_av_buf_enable(&disp->av_buf);
+ /* Choose clock source based on the DT clock handle */
+ zynqmp_disp_av_buf_set_vid_clock_src(&disp->av_buf, !!disp->_ps_pclk);
+ zynqmp_disp_av_buf_set_aud_clock_src(&disp->av_buf, !!disp->_ps_audclk);
+ live = zynqmp_disp_layer_is_live(disp);
+ zynqmp_disp_av_buf_set_vid_timing_src(&disp->av_buf, !live);
+ zynqmp_disp_av_buf_enable_buf(&disp->av_buf);
+ zynqmp_disp_av_buf_enable_aud(&disp->av_buf);
+ zynqmp_disp_aud_init(&disp->aud);
+ disp->enabled = true;
+}
+
+/**
+ * zynqmp_disp_disable - Disable the Display subsystem
+ * @disp: Display subsystem
+ * @force: flag to disable forcefully
+ *
+ * Disable the Display subsystem.
+ */
+static void zynqmp_disp_disable(struct zynqmp_disp *disp, bool force)
+{
+ struct drm_crtc *crtc = &disp->xlnx_crtc.crtc;
+
+ if (!force && (!disp->enabled || zynqmp_disp_layer_is_enabled(disp)))
+ return;
+
+ zynqmp_disp_aud_deinit(&disp->aud);
+ zynqmp_disp_av_buf_disable_aud(&disp->av_buf);
+ zynqmp_disp_av_buf_disable_buf(&disp->av_buf);
+ zynqmp_disp_av_buf_disable(&disp->av_buf);
+
+ /* Mark the flip is done as crtc is disabled anyway */
+ if (crtc->state->event) {
+ complete_all(crtc->state->event->base.completion);
+ crtc->state->event = NULL;
+ }
+
+ disp->enabled = false;
+}
+
+/**
+ * zynqmp_disp_init - Initialize the Display subsystem states
+ * @disp: Display subsystem
+ *
+ * Some states are not initialized as desired. For example, the output select
+ * register resets to the live source. This function is to initialize
+ * some register states as desired.
+ */
+static void zynqmp_disp_init(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ layer = &disp->layers[i];
+ zynqmp_disp_av_buf_disable_vid(&disp->av_buf, layer);
+ }
+}
+
+/*
+ * ZynqMP Display external functions for zynqmp_dp
+ */
+
+/**
+ * zynqmp_disp_handle_vblank - Handle the vblank event
+ * @disp: Display subsystem
+ *
+ * This function handles the vblank interrupt, and sends an event to
+ * CRTC object. This will be called by the DP vblank interrupt handler.
+ */
+void zynqmp_disp_handle_vblank(struct zynqmp_disp *disp)
+{
+ struct drm_crtc *crtc = &disp->xlnx_crtc.crtc;
+
+ drm_crtc_handle_vblank(crtc);
+}
+
+/**
+ * zynqmp_disp_get_apb_clk_rate - Get the current APB clock rate
+ * @disp: Display subsystem
+ *
+ * Return: the current APB clock rate.
+ */
+unsigned int zynqmp_disp_get_apb_clk_rate(struct zynqmp_disp *disp)
+{
+ return clk_get_rate(disp->aclk);
+}
+
+/**
+ * zynqmp_disp_aud_enabled - If the audio is enabled
+ * @disp: Display subsystem
+ *
+ * Return if the audio is enabled depending on the audio clock.
+ *
+ * Return: true if audio is enabled, or false.
+ */
+bool zynqmp_disp_aud_enabled(struct zynqmp_disp *disp)
+{
+ return !!disp->audclk;
+}
+
+/**
+ * zynqmp_disp_get_aud_clk_rate - Get the current audio clock rate
+ * @disp: Display subsystem
+ *
+ * Return: the current audio clock rate.
+ */
+unsigned int zynqmp_disp_get_aud_clk_rate(struct zynqmp_disp *disp)
+{
+ if (zynqmp_disp_aud_enabled(disp))
+ return 0;
+ return clk_get_rate(disp->aclk);
+}
+
+/**
+ * zynqmp_disp_get_crtc_mask - Return the CRTC bit mask
+ * @disp: Display subsystem
+ *
+ * Return: the crtc mask of the zyqnmp_disp CRTC.
+ */
+uint32_t zynqmp_disp_get_crtc_mask(struct zynqmp_disp *disp)
+{
+ return drm_crtc_mask(&disp->xlnx_crtc.crtc);
+}
+
+/*
+ * Xlnx bridge functions
+ */
+
+static inline struct zynqmp_disp_layer
+*bridge_to_layer(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct zynqmp_disp_layer, bridge);
+}
+
+static int zynqmp_disp_bridge_enable(struct xlnx_bridge *bridge)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret;
+
+ if (!disp->_pl_pclk) {
+ dev_err(disp->dev, "PL clock is required for live\n");
+ return -ENODEV;
+ }
+
+ ret = zynqmp_disp_layer_check_size(disp, layer, layer->w, layer->h);
+ if (ret)
+ return ret;
+
+ zynqmp_disp_set_g_alpha(disp, disp->alpha_en);
+ zynqmp_disp_set_alpha(disp, disp->alpha);
+ ret = zynqmp_disp_layer_enable(layer->disp, layer,
+ ZYNQMP_DISP_LAYER_LIVE);
+ if (ret)
+ return ret;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_GFX && disp->tpg_on) {
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+ }
+
+ if (zynqmp_disp_av_buf_vid_timing_src_is_int(&disp->av_buf) ||
+ zynqmp_disp_av_buf_vid_clock_src_is_ps(&disp->av_buf)) {
+ dev_info(disp->dev,
+ "Disabling the pipeline to change the clk/timing src");
+ zynqmp_disp_disable(disp, true);
+ zynqmp_disp_av_buf_set_vid_clock_src(&disp->av_buf, false);
+ zynqmp_disp_av_buf_set_vid_timing_src(&disp->av_buf, false);
+ }
+
+ zynqmp_disp_enable(disp);
+
+ return 0;
+}
+
+static void zynqmp_disp_bridge_disable(struct xlnx_bridge *bridge)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+ struct zynqmp_disp *disp = layer->disp;
+
+ zynqmp_disp_disable(disp, false);
+
+ zynqmp_disp_layer_disable(disp, layer, ZYNQMP_DISP_LAYER_LIVE);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID && disp->tpg_on)
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+
+ if (!zynqmp_disp_layer_is_live(disp)) {
+ dev_info(disp->dev,
+ "Disabling the pipeline to change the clk/timing src");
+ zynqmp_disp_disable(disp, true);
+ zynqmp_disp_av_buf_set_vid_clock_src(&disp->av_buf, true);
+ zynqmp_disp_av_buf_set_vid_timing_src(&disp->av_buf, true);
+ if (zynqmp_disp_layer_is_enabled(disp))
+ zynqmp_disp_enable(disp);
+ }
+}
+
+static int zynqmp_disp_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+ int ret;
+
+ ret = zynqmp_disp_layer_check_size(layer->disp, layer, width, height);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_disp_layer_set_live_fmt(layer->disp, layer, bus_fmt);
+ if (ret)
+ dev_err(layer->disp->dev, "failed to set live fmt\n");
+
+ return ret;
+}
+
+static int zynqmp_disp_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+
+ *fmts = layer->bus_fmts;
+ *count = layer->num_bus_fmts;
+
+ return 0;
+}
+
+/*
+ * DRM plane functions
+ */
+
+static inline struct zynqmp_disp_layer *plane_to_layer(struct drm_plane *plane)
+{
+ return container_of(plane, struct zynqmp_disp_layer, plane);
+}
+
+static int zynqmp_disp_plane_enable(struct drm_plane *plane)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret;
+
+ zynqmp_disp_set_g_alpha(disp, disp->alpha_en);
+ zynqmp_disp_set_alpha(disp, disp->alpha);
+ ret = zynqmp_disp_layer_enable(layer->disp, layer,
+ ZYNQMP_DISP_LAYER_NONLIVE);
+ if (ret)
+ return ret;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_GFX && disp->tpg_on) {
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+ }
+
+ return 0;
+}
+
+static int zynqmp_disp_plane_disable(struct drm_plane *plane)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+
+ zynqmp_disp_layer_disable(disp, layer, ZYNQMP_DISP_LAYER_NONLIVE);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID && disp->tpg_on)
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+
+ return 0;
+}
+
+static int zynqmp_disp_plane_mode_set(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ const struct drm_format_info *info = fb->format;
+ struct device *dev = layer->disp->dev;
+ dma_addr_t paddr;
+ unsigned int i;
+ int ret;
+
+ if (!info) {
+ dev_err(dev, "No format info found\n");
+ return -EINVAL;
+ }
+
+ ret = zynqmp_disp_layer_check_size(layer->disp, layer, src_w, src_h);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = src_w / (i ? info->hsub : 1);
+ unsigned int height = src_h / (i ? info->vsub : 1);
+ int width_bytes;
+
+ paddr = drm_fb_cma_get_gem_addr(fb, plane->state, i);
+ if (!paddr) {
+ dev_err(dev, "failed to get a paddr\n");
+ return -EINVAL;
+ }
+
+ layer->dma[i].xt.numf = height;
+ width_bytes = drm_format_plane_width_bytes(info, i, width);
+ layer->dma[i].sgl[0].size = width_bytes;
+ layer->dma[i].sgl[0].icg = fb->pitches[i] -
+ layer->dma[i].sgl[0].size;
+ layer->dma[i].xt.src_start = paddr;
+ layer->dma[i].xt.frame_size = 1;
+ layer->dma[i].xt.dir = DMA_MEM_TO_DEV;
+ layer->dma[i].xt.src_sgl = true;
+ layer->dma[i].xt.dst_sgl = false;
+ layer->dma[i].is_active = true;
+ }
+
+ for (; i < ZYNQMP_DISP_MAX_NUM_SUB_PLANES; i++)
+ layer->dma[i].is_active = false;
+
+ ret = zynqmp_disp_layer_set_fmt(layer->disp, layer, info->format);
+ if (ret)
+ dev_err(dev, "failed to set dp_sub layer fmt\n");
+
+ return ret;
+}
+
+static void zynqmp_disp_plane_destroy(struct drm_plane *plane)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+
+ xlnx_bridge_unregister(&layer->bridge);
+ drm_plane_cleanup(plane);
+}
+
+static int
+zynqmp_disp_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property, u64 val)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret = 0;
+
+ if (property == disp->g_alpha_prop)
+ zynqmp_disp_set_alpha(disp, val);
+ else if (property == disp->g_alpha_en_prop)
+ zynqmp_disp_set_g_alpha(disp, val);
+ else if (property == disp->tpg_prop)
+ ret = zynqmp_disp_layer_set_tpg(disp, layer, val);
+ else
+ return -EINVAL;
+
+ return ret;
+}
+
+static int
+zynqmp_disp_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret = 0;
+
+ if (property == disp->g_alpha_prop)
+ *val = zynqmp_disp_get_alpha(disp);
+ else if (property == disp->g_alpha_en_prop)
+ *val = zynqmp_disp_get_g_alpha(disp);
+ else if (property == disp->tpg_prop)
+ *val = zynqmp_disp_layer_get_tpg(disp, layer);
+ else
+ return -EINVAL;
+
+ return ret;
+}
+
+static int
+zynqmp_disp_plane_atomic_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_w = crtc_w;
+ plane_state->crtc_h = crtc_h;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_w = src_w;
+ plane_state->src_h = src_h;
+
+ if (plane == crtc->cursor)
+ state->legacy_cursor_update = true;
+
+ /* Do async-update if possible */
+ state->async_update = !drm_atomic_helper_async_check(plane->dev, state);
+ ret = drm_atomic_commit(state);
+fail:
+ drm_atomic_state_put(state);
+ return ret;
+}
+
+static struct drm_plane_funcs zynqmp_disp_plane_funcs = {
+ .update_plane = zynqmp_disp_plane_atomic_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .atomic_set_property = zynqmp_disp_plane_atomic_set_property,
+ .atomic_get_property = zynqmp_disp_plane_atomic_get_property,
+ .destroy = zynqmp_disp_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static void
+zynqmp_disp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ int ret;
+
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ if (plane->state->fb == old_state->fb)
+ return;
+
+ if (old_state->fb &&
+ old_state->fb->format->format != plane->state->fb->format->format)
+ zynqmp_disp_plane_disable(plane);
+
+ ret = zynqmp_disp_plane_mode_set(plane, plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret)
+ return;
+
+ zynqmp_disp_plane_enable(plane);
+}
+
+static void
+zynqmp_disp_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ zynqmp_disp_plane_disable(plane);
+}
+
+static int zynqmp_disp_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void
+zynqmp_disp_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ int ret;
+
+ if (plane->state->fb == new_state->fb)
+ return;
+
+ if (plane->state->fb &&
+ plane->state->fb->format->format != new_state->fb->format->format)
+ zynqmp_disp_plane_disable(plane);
+
+ /* Update the current state with new configurations */
+ drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+ plane->state->crtc = new_state->crtc;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_w = new_state->src_w;
+ plane->state->src_h = new_state->src_h;
+ plane->state->state = new_state->state;
+
+ ret = zynqmp_disp_plane_mode_set(plane, plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret)
+ return;
+
+ zynqmp_disp_plane_enable(plane);
+}
+
+static const struct drm_plane_helper_funcs zynqmp_disp_plane_helper_funcs = {
+ .atomic_update = zynqmp_disp_plane_atomic_update,
+ .atomic_disable = zynqmp_disp_plane_atomic_disable,
+ .atomic_async_check = zynqmp_disp_plane_atomic_async_check,
+ .atomic_async_update = zynqmp_disp_plane_atomic_async_update,
+};
+
+static int zynqmp_disp_create_plane(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ unsigned int i;
+ u32 *fmts = NULL;
+ unsigned int num_fmts = 0;
+ enum drm_plane_type type;
+ int ret;
+
+ /* graphics layer is primary, and video layer is overaly */
+ type = DRM_PLANE_TYPE_OVERLAY;
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ layer = &disp->layers[i];
+ zynqmp_disp_layer_get_fmts(disp, layer, &fmts, &num_fmts);
+ ret = drm_universal_plane_init(disp->drm, &layer->plane, 0,
+ &zynqmp_disp_plane_funcs, fmts,
+ num_fmts, NULL, type, NULL);
+ if (ret)
+ goto err_plane;
+ drm_plane_helper_add(&layer->plane,
+ &zynqmp_disp_plane_helper_funcs);
+ type = DRM_PLANE_TYPE_PRIMARY;
+ }
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ layer = &disp->layers[i];
+ layer->bridge.enable = &zynqmp_disp_bridge_enable;
+ layer->bridge.disable = &zynqmp_disp_bridge_disable;
+ layer->bridge.set_input = &zynqmp_disp_bridge_set_input;
+ layer->bridge.get_input_fmts =
+ &zynqmp_disp_bridge_get_input_fmts;
+ layer->bridge.of_node = layer->of_node;
+ xlnx_bridge_register(&layer->bridge);
+ }
+
+ /* Attach properties to each layers */
+ drm_object_attach_property(&layer->plane.base, disp->g_alpha_prop,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX);
+ disp->alpha = ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX;
+ /* Enable the global alpha as default */
+ drm_object_attach_property(&layer->plane.base, disp->g_alpha_en_prop,
+ true);
+ disp->alpha_en = true;
+
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ drm_object_attach_property(&layer->plane.base, disp->tpg_prop, false);
+
+ return ret;
+
+err_plane:
+ if (i)
+ drm_plane_cleanup(&disp->layers[0].plane);
+ return ret;
+}
+
+static void zynqmp_disp_destroy_plane(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
+ zynqmp_disp_plane_destroy(&disp->layers[i].plane);
+}
+
+/*
+ * Xlnx crtc functions
+ */
+
+static inline struct zynqmp_disp *xlnx_crtc_to_disp(struct xlnx_crtc *xlnx_crtc)
+{
+ return container_of(xlnx_crtc, struct zynqmp_disp, xlnx_crtc);
+}
+
+static int zynqmp_disp_get_max_width(struct xlnx_crtc *xlnx_crtc)
+{
+ return ZYNQMP_DISP_MAX_WIDTH;
+}
+
+static int zynqmp_disp_get_max_height(struct xlnx_crtc *xlnx_crtc)
+{
+ return ZYNQMP_DISP_MAX_HEIGHT;
+}
+
+static uint32_t zynqmp_disp_get_format(struct xlnx_crtc *xlnx_crtc)
+{
+ struct zynqmp_disp *disp = xlnx_crtc_to_disp(xlnx_crtc);
+
+ return disp->layers[ZYNQMP_DISP_LAYER_GFX].fmt->drm_fmt;
+}
+
+static unsigned int zynqmp_disp_get_align(struct xlnx_crtc *xlnx_crtc)
+{
+ struct zynqmp_disp *disp = xlnx_crtc_to_disp(xlnx_crtc);
+ struct zynqmp_disp_layer *layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+
+ return 1 << layer->dma->chan->device->copy_align;
+}
+
+static u64 zynqmp_disp_get_dma_mask(struct xlnx_crtc *xlnx_crtc)
+{
+ return DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT);
+}
+
+/*
+ * DRM crtc functions
+ */
+
+static inline struct zynqmp_disp *crtc_to_disp(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+
+ return xlnx_crtc_to_disp(xlnx_crtc);
+}
+
+static int zynqmp_disp_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+ unsigned long rate;
+ long diff;
+ int ret;
+
+ zynqmp_disp_clk_disable(disp->pclk, &disp->pclk_en);
+ ret = clk_set_rate(disp->pclk, adjusted_mode->clock * 1000);
+ if (ret) {
+ dev_err(disp->dev, "failed to set a pixel clock\n");
+ return ret;
+ }
+
+ rate = clk_get_rate(disp->pclk);
+ diff = rate - adjusted_mode->clock * 1000;
+ if (abs(diff) > (adjusted_mode->clock * 1000) / 20) {
+ dev_info(disp->dev, "request pixel rate: %d actual rate: %lu\n",
+ adjusted_mode->clock, rate);
+ } else {
+ dev_dbg(disp->dev, "request pixel rate: %d actual rate: %lu\n",
+ adjusted_mode->clock, rate);
+ }
+
+ /* The timing register should be programmed always */
+ zynqmp_dp_encoder_mode_set_stream(disp->dpsub->dp, adjusted_mode);
+
+ return 0;
+}
+
+static void
+zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ int ret, vrefresh;
+
+ zynqmp_disp_crtc_mode_set(crtc, &crtc->state->mode,
+ adjusted_mode, crtc->x, crtc->y, NULL);
+
+ pm_runtime_get_sync(disp->dev);
+ ret = zynqmp_disp_clk_enable(disp->pclk, &disp->pclk_en);
+ if (ret) {
+ dev_err(disp->dev, "failed to enable a pixel clock\n");
+ return;
+ }
+ zynqmp_disp_set_output_fmt(disp, disp->color);
+ zynqmp_disp_set_bg_color(disp, disp->bg_c0, disp->bg_c1, disp->bg_c2);
+ zynqmp_disp_enable(disp);
+ /* Delay of 3 vblank intervals for timing gen to be stable */
+ vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+ msleep(3 * 1000 / vrefresh);
+}
+
+static void
+zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ zynqmp_disp_clk_disable(disp->pclk, &disp->pclk_en);
+ zynqmp_disp_plane_disable(crtc->primary);
+ zynqmp_disp_disable(disp, true);
+ drm_crtc_vblank_off(crtc);
+ pm_runtime_put_sync(disp->dev);
+}
+
+static int zynqmp_disp_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
+static void
+zynqmp_disp_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ drm_crtc_vblank_on(crtc);
+ /* Don't rely on vblank when disabling crtc */
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ /* Consume the flip_done event from atomic helper */
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static struct drm_crtc_helper_funcs zynqmp_disp_crtc_helper_funcs = {
+ .atomic_enable = zynqmp_disp_crtc_atomic_enable,
+ .atomic_disable = zynqmp_disp_crtc_atomic_disable,
+ .atomic_check = zynqmp_disp_crtc_atomic_check,
+ .atomic_begin = zynqmp_disp_crtc_atomic_begin,
+};
+
+static void zynqmp_disp_crtc_destroy(struct drm_crtc *crtc)
+{
+ zynqmp_disp_crtc_atomic_disable(crtc, NULL);
+ drm_crtc_cleanup(crtc);
+}
+
+static int zynqmp_disp_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ zynqmp_dp_enable_vblank(disp->dpsub->dp);
+
+ return 0;
+}
+
+static void zynqmp_disp_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ zynqmp_dp_disable_vblank(disp->dpsub->dp);
+}
+
+static int
+zynqmp_disp_crtc_atomic_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ /*
+ * CRTC prop values are just stored here and applied when CRTC gets
+ * enabled
+ */
+ if (property == disp->color_prop)
+ disp->color = val;
+ else if (property == disp->bg_c0_prop)
+ disp->bg_c0 = val;
+ else if (property == disp->bg_c1_prop)
+ disp->bg_c1 = val;
+ else if (property == disp->bg_c2_prop)
+ disp->bg_c2 = val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+zynqmp_disp_crtc_atomic_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ if (property == disp->color_prop)
+ *val = disp->color;
+ else if (property == disp->bg_c0_prop)
+ *val = disp->bg_c0;
+ else if (property == disp->bg_c1_prop)
+ *val = disp->bg_c1;
+ else if (property == disp->bg_c2_prop)
+ *val = disp->bg_c2;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct drm_crtc_funcs zynqmp_disp_crtc_funcs = {
+ .destroy = zynqmp_disp_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_set_property = zynqmp_disp_crtc_atomic_set_property,
+ .atomic_get_property = zynqmp_disp_crtc_atomic_get_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = zynqmp_disp_crtc_enable_vblank,
+ .disable_vblank = zynqmp_disp_crtc_disable_vblank,
+};
+
+static void zynqmp_disp_create_crtc(struct zynqmp_disp *disp)
+{
+ struct drm_plane *plane = &disp->layers[ZYNQMP_DISP_LAYER_GFX].plane;
+ struct drm_mode_object *obj = &disp->xlnx_crtc.crtc.base;
+ int ret;
+
+ ret = drm_crtc_init_with_planes(disp->drm, &disp->xlnx_crtc.crtc, plane,
+ NULL, &zynqmp_disp_crtc_funcs, NULL);
+ drm_crtc_helper_add(&disp->xlnx_crtc.crtc,
+ &zynqmp_disp_crtc_helper_funcs);
+ drm_object_attach_property(obj, disp->color_prop, 0);
+ zynqmp_dp_set_color(disp->dpsub->dp, zynqmp_disp_color_enum[0].name);
+ drm_object_attach_property(obj, disp->bg_c0_prop, 0);
+ drm_object_attach_property(obj, disp->bg_c1_prop, 0);
+ drm_object_attach_property(obj, disp->bg_c2_prop, 0);
+
+ disp->xlnx_crtc.get_max_width = &zynqmp_disp_get_max_width;
+ disp->xlnx_crtc.get_max_height = &zynqmp_disp_get_max_height;
+ disp->xlnx_crtc.get_format = &zynqmp_disp_get_format;
+ disp->xlnx_crtc.get_align = &zynqmp_disp_get_align;
+ disp->xlnx_crtc.get_dma_mask = &zynqmp_disp_get_dma_mask;
+ xlnx_crtc_register(disp->drm, &disp->xlnx_crtc);
+}
+
+static void zynqmp_disp_destroy_crtc(struct zynqmp_disp *disp)
+{
+ xlnx_crtc_unregister(disp->drm, &disp->xlnx_crtc);
+ zynqmp_disp_crtc_destroy(&disp->xlnx_crtc.crtc);
+}
+
+static void zynqmp_disp_map_crtc_to_plane(struct zynqmp_disp *disp)
+{
+ u32 possible_crtcs = drm_crtc_mask(&disp->xlnx_crtc.crtc);
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
+ disp->layers[i].plane.possible_crtcs = possible_crtcs;
+}
+
+/*
+ * Component functions
+ */
+
+int zynqmp_disp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_disp *disp = dpsub->disp;
+ struct drm_device *drm = data;
+ int num;
+ u64 max;
+ int ret;
+
+ disp->drm = drm;
+
+ max = ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX;
+ disp->g_alpha_prop = drm_property_create_range(drm, 0, "alpha", 0, max);
+ disp->g_alpha_en_prop = drm_property_create_bool(drm, 0,
+ "g_alpha_en");
+ num = ARRAY_SIZE(zynqmp_disp_color_enum);
+ disp->color_prop = drm_property_create_enum(drm, 0,
+ "output_color",
+ zynqmp_disp_color_enum,
+ num);
+ max = ZYNQMP_DISP_V_BLEND_BG_MAX;
+ disp->bg_c0_prop = drm_property_create_range(drm, 0, "bg_c0", 0, max);
+ disp->bg_c1_prop = drm_property_create_range(drm, 0, "bg_c1", 0, max);
+ disp->bg_c2_prop = drm_property_create_range(drm, 0, "bg_c2", 0, max);
+ disp->tpg_prop = drm_property_create_bool(drm, 0, "tpg");
+
+ ret = zynqmp_disp_create_plane(disp);
+ if (ret)
+ return ret;
+ zynqmp_disp_create_crtc(disp);
+ zynqmp_disp_map_crtc_to_plane(disp);
+
+ return 0;
+}
+
+void zynqmp_disp_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_disp *disp = dpsub->disp;
+
+ zynqmp_disp_destroy_crtc(disp);
+ zynqmp_disp_destroy_plane(disp);
+ drm_property_destroy(disp->drm, disp->bg_c2_prop);
+ drm_property_destroy(disp->drm, disp->bg_c1_prop);
+ drm_property_destroy(disp->drm, disp->bg_c0_prop);
+ drm_property_destroy(disp->drm, disp->color_prop);
+ drm_property_destroy(disp->drm, disp->g_alpha_en_prop);
+ drm_property_destroy(disp->drm, disp->g_alpha_prop);
+}
+
+/*
+ * Platform initialization functions
+ */
+
+static int zynqmp_disp_enumerate_fmts(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ u32 *bus_fmts;
+ u32 i, size, num_bus_fmts;
+ u32 gfx_fmt = ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565;
+
+ num_bus_fmts = ARRAY_SIZE(av_buf_live_fmts);
+ bus_fmts = devm_kzalloc(disp->dev, sizeof(*bus_fmts) * num_bus_fmts,
+ GFP_KERNEL);
+ if (!bus_fmts)
+ return -ENOMEM;
+ for (i = 0; i < num_bus_fmts; i++)
+ bus_fmts[i] = av_buf_live_fmts[i].bus_fmt;
+
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ layer->num_bus_fmts = num_bus_fmts;
+ layer->bus_fmts = bus_fmts;
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(disp->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+ for (i = 0; i < layer->num_fmts; i++)
+ layer->drm_fmts[i] = av_buf_vid_fmts[i].drm_fmt;
+ layer->fmt = &av_buf_vid_fmts[ZYNQMP_DISP_AV_BUF_VID_FMT_YUYV];
+
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_GFX];
+ layer->num_bus_fmts = num_bus_fmts;
+ layer->bus_fmts = bus_fmts;
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(disp->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+
+ for (i = 0; i < layer->num_fmts; i++)
+ layer->drm_fmts[i] = av_buf_gfx_fmts[i].drm_fmt;
+ if (zynqmp_disp_gfx_init_fmt < ARRAY_SIZE(zynqmp_disp_gfx_init_fmts))
+ gfx_fmt = zynqmp_disp_gfx_init_fmts[zynqmp_disp_gfx_init_fmt];
+ layer->fmt = &av_buf_gfx_fmts[gfx_fmt];
+
+ return 0;
+}
+
+int zynqmp_disp_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub;
+ struct zynqmp_disp *disp;
+ struct resource *res;
+ int ret;
+
+ disp = devm_kzalloc(&pdev->dev, sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
+ disp->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "blend");
+ disp->blend.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(disp->blend.base))
+ return PTR_ERR(disp->blend.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "av_buf");
+ disp->av_buf.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(disp->av_buf.base))
+ return PTR_ERR(disp->av_buf.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud");
+ disp->aud.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(disp->aud.base))
+ return PTR_ERR(disp->aud.base);
+
+ dpsub = platform_get_drvdata(pdev);
+ dpsub->disp = disp;
+ disp->dpsub = dpsub;
+
+ ret = zynqmp_disp_enumerate_fmts(disp);
+ if (ret)
+ return ret;
+
+ /* Try the live PL video clock */
+ disp->_pl_pclk = devm_clk_get(disp->dev, "dp_live_video_in_clk");
+ if (!IS_ERR(disp->_pl_pclk)) {
+ disp->pclk = disp->_pl_pclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->pclk,
+ &disp->pclk_en);
+ if (ret)
+ disp->pclk = NULL;
+ } else if (PTR_ERR(disp->_pl_pclk) == -EPROBE_DEFER) {
+ return PTR_ERR(disp->_pl_pclk);
+ }
+
+ /* If the live PL video clock is not valid, fall back to PS clock */
+ if (!disp->pclk) {
+ disp->_ps_pclk = devm_clk_get(disp->dev, "dp_vtc_pixel_clk_in");
+ if (IS_ERR(disp->_ps_pclk)) {
+ dev_err(disp->dev, "failed to init any video clock\n");
+ return PTR_ERR(disp->_ps_pclk);
+ }
+ disp->pclk = disp->_ps_pclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->pclk,
+ &disp->pclk_en);
+ if (ret) {
+ dev_err(disp->dev, "failed to init any video clock\n");
+ return ret;
+ }
+ }
+
+ disp->aclk = devm_clk_get(disp->dev, "dp_apb_clk");
+ if (IS_ERR(disp->aclk))
+ return PTR_ERR(disp->aclk);
+ ret = zynqmp_disp_clk_enable(disp->aclk, &disp->aclk_en);
+ if (ret) {
+ dev_err(disp->dev, "failed to enable the APB clk\n");
+ return ret;
+ }
+
+ /* Try the live PL audio clock */
+ disp->_pl_audclk = devm_clk_get(disp->dev, "dp_live_audio_aclk");
+ if (!IS_ERR(disp->_pl_audclk)) {
+ disp->audclk = disp->_pl_audclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->audclk,
+ &disp->audclk_en);
+ if (ret)
+ disp->audclk = NULL;
+ }
+
+ /* If the live PL audio clock is not valid, fall back to PS clock */
+ if (!disp->audclk) {
+ disp->_ps_audclk = devm_clk_get(disp->dev, "dp_aud_clk");
+ if (!IS_ERR(disp->_ps_audclk)) {
+ disp->audclk = disp->_ps_audclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->audclk,
+ &disp->audclk_en);
+ if (ret)
+ disp->audclk = NULL;
+ }
+
+ if (!disp->audclk) {
+ dev_err(disp->dev,
+ "audio is disabled due to clock failure\n");
+ }
+ }
+
+ ret = zynqmp_disp_layer_create(disp);
+ if (ret)
+ goto error_aclk;
+
+ zynqmp_disp_init(disp);
+
+ return 0;
+
+error_aclk:
+ zynqmp_disp_clk_disable(disp->aclk, &disp->aclk_en);
+ return ret;
+}
+
+int zynqmp_disp_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ struct zynqmp_disp *disp = dpsub->disp;
+
+ zynqmp_disp_layer_destroy(disp);
+ if (disp->audclk)
+ zynqmp_disp_clk_disable(disp->audclk, &disp->audclk_en);
+ zynqmp_disp_clk_disable(disp->aclk, &disp->aclk_en);
+ zynqmp_disp_clk_disable(disp->pclk, &disp->pclk_en);
+ dpsub->disp = NULL;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.h b/drivers/gpu/drm/xlnx/zynqmp_disp.h
new file mode 100644
index 000000000000..28d8188f8f5e
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP Display Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZYNQMP_DISP_H_
+#define _ZYNQMP_DISP_H_
+
+struct zynqmp_disp;
+
+void zynqmp_disp_handle_vblank(struct zynqmp_disp *disp);
+unsigned int zynqmp_disp_get_apb_clk_rate(struct zynqmp_disp *disp);
+bool zynqmp_disp_aud_enabled(struct zynqmp_disp *disp);
+unsigned int zynqmp_disp_get_aud_clk_rate(struct zynqmp_disp *disp);
+uint32_t zynqmp_disp_get_crtc_mask(struct zynqmp_disp *disp);
+
+int zynqmp_disp_bind(struct device *dev, struct device *master, void *data);
+void zynqmp_disp_unbind(struct device *dev, struct device *master, void *data);
+
+int zynqmp_disp_probe(struct platform_device *pdev);
+int zynqmp_disp_remove(struct platform_device *pdev);
+
+#endif /* _ZYNQMP_DISP_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
new file mode 100644
index 000000000000..c3c86dacac97
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -0,0 +1,1917 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DisplayPort Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_of.h>
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-zynqmp.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+
+#include "zynqmp_disp.h"
+#include "zynqmp_dpsub.h"
+
+static uint zynqmp_dp_aux_timeout_ms = 50;
+module_param_named(aux_timeout_ms, zynqmp_dp_aux_timeout_ms, uint, 0444);
+MODULE_PARM_DESC(aux_timeout_ms, "DP aux timeout value in msec (default: 50)");
+
+/*
+ * Some sink requires a delay after power on request
+ */
+static uint zynqmp_dp_power_on_delay_ms = 4;
+module_param_named(power_on_delay_ms, zynqmp_dp_power_on_delay_ms, uint, 0444);
+MODULE_PARM_DESC(aux_timeout_ms, "DP power on delay in msec (default: 4)");
+
+/* Link configuration registers */
+#define ZYNQMP_DP_TX_LINK_BW_SET 0x0
+#define ZYNQMP_DP_TX_LANE_CNT_SET 0x4
+#define ZYNQMP_DP_TX_ENHANCED_FRAME_EN 0x8
+#define ZYNQMP_DP_TX_TRAINING_PATTERN_SET 0xc
+#define ZYNQMP_DP_TX_SCRAMBLING_DISABLE 0x14
+#define ZYNQMP_DP_TX_DOWNSPREAD_CTL 0x18
+#define ZYNQMP_DP_TX_SW_RESET 0x1c
+#define ZYNQMP_DP_TX_SW_RESET_STREAM1 BIT(0)
+#define ZYNQMP_DP_TX_SW_RESET_STREAM2 BIT(1)
+#define ZYNQMP_DP_TX_SW_RESET_STREAM3 BIT(2)
+#define ZYNQMP_DP_TX_SW_RESET_STREAM4 BIT(3)
+#define ZYNQMP_DP_TX_SW_RESET_AUX BIT(7)
+#define ZYNQMP_DP_TX_SW_RESET_ALL (ZYNQMP_DP_TX_SW_RESET_STREAM1 | \
+ ZYNQMP_DP_TX_SW_RESET_STREAM2 | \
+ ZYNQMP_DP_TX_SW_RESET_STREAM3 | \
+ ZYNQMP_DP_TX_SW_RESET_STREAM4 | \
+ ZYNQMP_DP_TX_SW_RESET_AUX)
+
+/* Core enable registers */
+#define ZYNQMP_DP_TX_ENABLE 0x80
+#define ZYNQMP_DP_TX_ENABLE_MAIN_STREAM 0x84
+#define ZYNQMP_DP_TX_FORCE_SCRAMBLER_RESET 0xc0
+#define ZYNQMP_DP_TX_VERSION 0xf8
+#define ZYNQMP_DP_TX_VERSION_MAJOR_MASK GENMASK(31, 24)
+#define ZYNQMP_DP_TX_VERSION_MAJOR_SHIFT 24
+#define ZYNQMP_DP_TX_VERSION_MINOR_MASK GENMASK(23, 16)
+#define ZYNQMP_DP_TX_VERSION_MINOR_SHIFT 16
+#define ZYNQMP_DP_TX_VERSION_REVISION_MASK GENMASK(15, 12)
+#define ZYNQMP_DP_TX_VERSION_REVISION_SHIFT 12
+#define ZYNQMP_DP_TX_VERSION_PATCH_MASK GENMASK(11, 8)
+#define ZYNQMP_DP_TX_VERSION_PATCH_SHIFT 8
+#define ZYNQMP_DP_TX_VERSION_INTERNAL_MASK GENMASK(7, 0)
+#define ZYNQMP_DP_TX_VERSION_INTERNAL_SHIFT 0
+
+/* Core ID registers */
+#define ZYNQMP_DP_TX_CORE_ID 0xfc
+#define ZYNQMP_DP_TX_CORE_ID_MAJOR_MASK GENMASK(31, 24)
+#define ZYNQMP_DP_TX_CORE_ID_MAJOR_SHIFT 24
+#define ZYNQMP_DP_TX_CORE_ID_MINOR_MASK GENMASK(23, 16)
+#define ZYNQMP_DP_TX_CORE_ID_MINOR_SHIFT 16
+#define ZYNQMP_DP_TX_CORE_ID_REVISION_MASK GENMASK(15, 8)
+#define ZYNQMP_DP_TX_CORE_ID_REVISION_SHIFT 8
+#define ZYNQMP_DP_TX_CORE_ID_DIRECTION GENMASK(1)
+
+/* AUX channel interface registers */
+#define ZYNQMP_DP_TX_AUX_COMMAND 0x100
+#define ZYNQMP_DP_TX_AUX_COMMAND_CMD_SHIFT 8
+#define ZYNQMP_DP_TX_AUX_COMMAND_ADDRESS_ONLY BIT(12)
+#define ZYNQMP_DP_TX_AUX_COMMAND_BYTES_SHIFT 0
+#define ZYNQMP_DP_TX_AUX_WRITE_FIFO 0x104
+#define ZYNQMP_DP_TX_AUX_ADDRESS 0x108
+#define ZYNQMP_DP_TX_CLK_DIVIDER 0x10c
+#define ZYNQMP_DP_TX_CLK_DIVIDER_MHZ 1000000
+#define ZYNQMP_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT 8
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE 0x130
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_HPD BIT(0)
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REQUEST BIT(1)
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY BIT(2)
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT BIT(3)
+#define ZYNQMP_DP_TX_AUX_REPLY_DATA 0x134
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE 0x138
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_ACK (0)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_NACK BIT(0)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_DEFER BIT(1)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_ACK (0)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_NACK BIT(2)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_DEFER BIT(3)
+#define ZYNQMP_DP_TX_AUX_REPLY_CNT 0x13c
+#define ZYNQMP_DP_TX_AUX_REPLY_CNT_MASK 0xff
+#define ZYNQMP_DP_TX_INTR_STATUS 0x140
+#define ZYNQMP_DP_TX_INTR_MASK 0x144
+#define ZYNQMP_DP_TX_INTR_HPD_IRQ BIT(0)
+#define ZYNQMP_DP_TX_INTR_HPD_EVENT BIT(1)
+#define ZYNQMP_DP_TX_INTR_REPLY_RECV BIT(2)
+#define ZYNQMP_DP_TX_INTR_REPLY_TIMEOUT BIT(3)
+#define ZYNQMP_DP_TX_INTR_HPD_PULSE BIT(4)
+#define ZYNQMP_DP_TX_INTR_EXT_PKT_TXD BIT(5)
+#define ZYNQMP_DP_TX_INTR_LIV_ABUF_UNDRFLW BIT(12)
+#define ZYNQMP_DP_TX_INTR_VBLANK_START BIT(13)
+#define ZYNQMP_DP_TX_INTR_PIXEL0_MATCH BIT(14)
+#define ZYNQMP_DP_TX_INTR_PIXEL1_MATCH BIT(15)
+#define ZYNQMP_DP_TX_INTR_CHBUF_UNDERFLW_MASK 0x3f0000
+#define ZYNQMP_DP_TX_INTR_CHBUF_OVERFLW_MASK 0xfc00000
+#define ZYNQMP_DP_TX_INTR_CUST_TS_2 BIT(28)
+#define ZYNQMP_DP_TX_INTR_CUST_TS BIT(29)
+#define ZYNQMP_DP_TX_INTR_EXT_VSYNC_TS BIT(30)
+#define ZYNQMP_DP_TX_INTR_VSYNC_TS BIT(31)
+#define ZYNQMP_DP_TX_INTR_ALL (ZYNQMP_DP_TX_INTR_HPD_IRQ | \
+ ZYNQMP_DP_TX_INTR_HPD_EVENT | \
+ ZYNQMP_DP_TX_INTR_REPLY_RECV | \
+ ZYNQMP_DP_TX_INTR_REPLY_TIMEOUT | \
+ ZYNQMP_DP_TX_INTR_HPD_PULSE | \
+ ZYNQMP_DP_TX_INTR_EXT_PKT_TXD | \
+ ZYNQMP_DP_TX_INTR_LIV_ABUF_UNDRFLW | \
+ ZYNQMP_DP_TX_INTR_CHBUF_UNDERFLW_MASK | \
+ ZYNQMP_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+#define ZYNQMP_DP_TX_NO_INTR_ALL (ZYNQMP_DP_TX_INTR_PIXEL0_MATCH | \
+ ZYNQMP_DP_TX_INTR_PIXEL1_MATCH | \
+ ZYNQMP_DP_TX_INTR_CUST_TS_2 | \
+ ZYNQMP_DP_TX_INTR_CUST_TS | \
+ ZYNQMP_DP_TX_INTR_EXT_VSYNC_TS | \
+ ZYNQMP_DP_TX_INTR_VSYNC_TS)
+#define ZYNQMP_DP_TX_REPLY_DATA_CNT 0x148
+#define ZYNQMP_DP_SUB_TX_INTR_STATUS 0x3a0
+#define ZYNQMP_DP_SUB_TX_INTR_MASK 0x3a4
+#define ZYNQMP_DP_SUB_TX_INTR_EN 0x3a8
+#define ZYNQMP_DP_SUB_TX_INTR_DS 0x3ac
+
+/* Main stream attribute registers */
+#define ZYNQMP_DP_TX_MAIN_STREAM_HTOTAL 0x180
+#define ZYNQMP_DP_TX_MAIN_STREAM_VTOTAL 0x184
+#define ZYNQMP_DP_TX_MAIN_STREAM_POLARITY 0x188
+#define ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT 0
+#define ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT 1
+#define ZYNQMP_DP_TX_MAIN_STREAM_HSWIDTH 0x18c
+#define ZYNQMP_DP_TX_MAIN_STREAM_VSWIDTH 0x190
+#define ZYNQMP_DP_TX_MAIN_STREAM_HRES 0x194
+#define ZYNQMP_DP_TX_MAIN_STREAM_VRES 0x198
+#define ZYNQMP_DP_TX_MAIN_STREAM_HSTART 0x19c
+#define ZYNQMP_DP_TX_MAIN_STREAM_VSTART 0x1a0
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0 0x1a4
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC BIT(0)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_FORMAT_SHIFT 1
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_DYNAMIC_RANGE BIT(3)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_YCBCR_COLRIMETRY BIT(4)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_BPC_SHIFT 5
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC1 0x1a8
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_INTERLACED_VERT BIT(0)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_STEREO_VID_SHIFT 1
+#define ZYNQMP_DP_TX_M_VID 0x1ac
+#define ZYNQMP_DP_TX_TRANSFER_UNIT_SIZE 0x1b0
+#define ZYNQMP_DP_TX_DEF_TRANSFER_UNIT_SIZE 64
+#define ZYNQMP_DP_TX_N_VID 0x1b4
+#define ZYNQMP_DP_TX_USER_PIXEL_WIDTH 0x1b8
+#define ZYNQMP_DP_TX_USER_DATA_CNT_PER_LANE 0x1bc
+#define ZYNQMP_DP_TX_MIN_BYTES_PER_TU 0x1c4
+#define ZYNQMP_DP_TX_FRAC_BYTES_PER_TU 0x1c8
+#define ZYNQMP_DP_TX_INIT_WAIT 0x1cc
+
+/* PHY configuration and status registers */
+#define ZYNQMP_DP_TX_PHY_CONFIG 0x200
+#define ZYNQMP_DP_TX_PHY_CONFIG_PHY_RESET BIT(0)
+#define ZYNQMP_DP_TX_PHY_CONFIG_GTTX_RESET BIT(1)
+#define ZYNQMP_DP_TX_PHY_CONFIG_PHY_PMA_RESET BIT(8)
+#define ZYNQMP_DP_TX_PHY_CONFIG_PHY_PCS_RESET BIT(9)
+#define ZYNQMP_DP_TX_PHY_CONFIG_ALL_RESET (ZYNQMP_DP_TX_PHY_CONFIG_PHY_RESET | \
+ ZYNQMP_DP_TX_PHY_CONFIG_GTTX_RESET | \
+ ZYNQMP_DP_TX_PHY_CONFIG_PHY_PMA_RESET | \
+ ZYNQMP_DP_TX_PHY_CONFIG_PHY_PCS_RESET)
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_0 0x210
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_1 0x214
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_2 0x218
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_3 0x21c
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_0 0x220
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_1 0x224
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_2 0x228
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_3 0x22c
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING 0x234
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162 0x1
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270 0x3
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540 0x5
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN 0x238
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_0 BIT(0)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_1 BIT(1)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_2 BIT(2)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_3 BIT(3)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL 0xf
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_0 0x23c
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_1 0x240
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_2 0x244
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_3 0x248
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_0 0x24c
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_1 0x250
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_2 0x254
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_3 0x258
+#define ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 0x24c
+#define ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_1 0x250
+#define ZYNQMP_DP_TX_PHY_STATUS 0x280
+#define ZYNQMP_DP_TX_PHY_STATUS_PLL_LOCKED_SHIFT 4
+#define ZYNQMP_DP_TX_PHY_STATUS_FPGA_PLL_LOCKED BIT(6)
+
+/* Audio registers */
+#define ZYNQMP_DP_TX_AUDIO_CONTROL 0x300
+#define ZYNQMP_DP_TX_AUDIO_CHANNELS 0x304
+#define ZYNQMP_DP_TX_AUDIO_INFO_DATA 0x308
+#define ZYNQMP_DP_TX_AUDIO_M_AUD 0x328
+#define ZYNQMP_DP_TX_AUDIO_N_AUD 0x32c
+#define ZYNQMP_DP_TX_AUDIO_EXT_DATA 0x330
+
+#define ZYNQMP_DP_MISC0_RGB (0)
+#define ZYNQMP_DP_MISC0_YCRCB_422 (5 << 1)
+#define ZYNQMP_DP_MISC0_YCRCB_444 (6 << 1)
+#define ZYNQMP_DP_MISC0_FORMAT_MASK 0xe
+#define ZYNQMP_DP_MISC0_BPC_6 (0 << 5)
+#define ZYNQMP_DP_MISC0_BPC_8 (1 << 5)
+#define ZYNQMP_DP_MISC0_BPC_10 (2 << 5)
+#define ZYNQMP_DP_MISC0_BPC_12 (3 << 5)
+#define ZYNQMP_DP_MISC0_BPC_16 (4 << 5)
+#define ZYNQMP_DP_MISC0_BPC_MASK 0xe0
+#define ZYNQMP_DP_MISC1_Y_ONLY (1 << 7)
+
+#define ZYNQMP_DP_MAX_LANES 2
+#define ZYNQMP_MAX_FREQ 3000000
+
+#define DP_REDUCED_BIT_RATE 162000
+#define DP_HIGH_BIT_RATE 270000
+#define DP_HIGH_BIT_RATE2 540000
+#define DP_MAX_TRAINING_TRIES 5
+#define DP_V1_2 0x12
+
+/**
+ * struct zynqmp_dp_link_config - Common link config between source and sink
+ * @max_rate: maximum link rate
+ * @max_lanes: maximum number of lanes
+ */
+struct zynqmp_dp_link_config {
+ int max_rate;
+ u8 max_lanes;
+};
+
+/**
+ * struct zynqmp_dp_mode - Configured mode of DisplayPort
+ * @bw_code: code for bandwidth(link rate)
+ * @lane_cnt: number of lanes
+ * @pclock: pixel clock frequency of current mode
+ * @fmt: format identifier string
+ */
+struct zynqmp_dp_mode {
+ u8 bw_code;
+ u8 lane_cnt;
+ int pclock;
+ const char *fmt;
+};
+
+/**
+ * struct zynqmp_dp_config - Configuration of DisplayPort from DTS
+ * @misc0: misc0 configuration (per DP v1.2 spec)
+ * @misc1: misc1 configuration (per DP v1.2 spec)
+ * @bpp: bits per pixel
+ * @bpc: bits per component
+ * @num_colors: number of color components
+ */
+struct zynqmp_dp_config {
+ u8 misc0;
+ u8 misc1;
+ u8 bpp;
+ u8 bpc;
+ u8 num_colors;
+};
+
+/**
+ * struct zynqmp_dp - Xilinx DisplayPort core
+ * @encoder: the drm encoder structure
+ * @connector: the drm connector structure
+ * @sync_prop: synchronous mode property
+ * @bpc_prop: bpc mode property
+ * @dev: device structure
+ * @dpsub: Display subsystem
+ * @drm: DRM core
+ * @iomem: device I/O memory for register access
+ * @irq: irq
+ * @config: IP core configuration from DTS
+ * @aux: aux channel
+ * @phy: PHY handles for DP lanes
+ * @num_lanes: number of enabled phy lanes
+ * @hpd_work: hot plug detection worker
+ * @status: connection status
+ * @enabled: flag to indicate if the device is enabled
+ * @dpms: current dpms state
+ * @dpcd: DP configuration data from currently connected sink device
+ * @link_config: common link configuration between IP core and sink device
+ * @mode: current mode between IP core and sink device
+ * @train_set: set of training data
+ */
+struct zynqmp_dp {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct drm_property *sync_prop;
+ struct drm_property *bpc_prop;
+ struct device *dev;
+ struct zynqmp_dpsub *dpsub;
+ struct drm_device *drm;
+ void __iomem *iomem;
+ int irq;
+
+ struct zynqmp_dp_config config;
+ struct drm_dp_aux aux;
+ struct phy *phy[ZYNQMP_DP_MAX_LANES];
+ u8 num_lanes;
+ struct delayed_work hpd_work;
+ enum drm_connector_status status;
+ bool enabled;
+
+ int dpms;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ struct zynqmp_dp_link_config link_config;
+ struct zynqmp_dp_mode mode;
+ u8 train_set[ZYNQMP_DP_MAX_LANES];
+};
+
+static inline struct zynqmp_dp *encoder_to_dp(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct zynqmp_dp, encoder);
+}
+
+static inline struct zynqmp_dp *connector_to_dp(struct drm_connector *connector)
+{
+ return container_of(connector, struct zynqmp_dp, connector);
+}
+
+static void zynqmp_dp_write(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static u32 zynqmp_dp_read(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static void zynqmp_dp_clr(void __iomem *base, int offset, u32 clr)
+{
+ zynqmp_dp_write(base, offset, zynqmp_dp_read(base, offset) & ~clr);
+}
+
+static void zynqmp_dp_set(void __iomem *base, int offset, u32 set)
+{
+ zynqmp_dp_write(base, offset, zynqmp_dp_read(base, offset) | set);
+}
+
+/*
+ * Internal functions: used by zynqmp_disp.c
+ */
+
+/**
+ * zynqmp_dp_update_bpp - Update the current bpp config
+ * @dp: DisplayPort IP core structure
+ *
+ * Update the current bpp based on the color format: bpc & num_colors.
+ * Any function that changes bpc or num_colors should call this
+ * to keep the bpp value in sync.
+ */
+static void zynqmp_dp_update_bpp(struct zynqmp_dp *dp)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ config->bpp = dp->config.bpc * dp->config.num_colors;
+}
+
+/**
+ * zynqmp_dp_set_color - Set the color
+ * @dp: DisplayPort IP core structure
+ * @color: color string, from zynqmp_disp_color_enum
+ *
+ * Update misc register values based on @color string.
+ *
+ * Return: 0 on success, or -EINVAL.
+ */
+int zynqmp_dp_set_color(struct zynqmp_dp *dp, const char *color)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ config->misc0 &= ~ZYNQMP_DP_MISC0_FORMAT_MASK;
+ config->misc1 &= ~ZYNQMP_DP_MISC1_Y_ONLY;
+ if (strcmp(color, "rgb") == 0) {
+ config->misc0 |= ZYNQMP_DP_MISC0_RGB;
+ config->num_colors = 3;
+ } else if (strcmp(color, "ycrcb422") == 0) {
+ config->misc0 |= ZYNQMP_DP_MISC0_YCRCB_422;
+ config->num_colors = 2;
+ } else if (strcmp(color, "ycrcb444") == 0) {
+ config->misc0 |= ZYNQMP_DP_MISC0_YCRCB_444;
+ config->num_colors = 3;
+ } else if (strcmp(color, "yonly") == 0) {
+ config->misc1 |= ZYNQMP_DP_MISC1_Y_ONLY;
+ config->num_colors = 1;
+ } else {
+ dev_err(dp->dev, "Invalid colormetry in DT\n");
+ return -EINVAL;
+ }
+ zynqmp_dp_update_bpp(dp);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_enable_vblank - Enable vblank
+ * @dp: DisplayPort IP core structure
+ *
+ * Enable vblank interrupt
+ */
+void zynqmp_dp_enable_vblank(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_EN,
+ ZYNQMP_DP_TX_INTR_VBLANK_START);
+}
+
+/**
+ * zynqmp_dp_disable_vblank - Disable vblank
+ * @dp: DisplayPort IP core structure
+ *
+ * Disable vblank interrupt
+ */
+void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS,
+ ZYNQMP_DP_TX_INTR_VBLANK_START);
+}
+
+/*
+ * DP PHY functions
+ */
+
+/**
+ * zynqmp_dp_init_phy - Initialize the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the phy.
+ *
+ * Return: 0 if the phy instances are initialized correctly, or the error code
+ * returned from the callee functions.
+ * Note: We can call this function without any phy lane assigned to DP.
+ */
+static int zynqmp_dp_init_phy(struct zynqmp_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->num_lanes; i++) {
+ ret = phy_init(dp->phy[i]);
+ if (ret) {
+ dev_err(dp->dev, "failed to init phy lane %d\n", i);
+ return ret;
+ }
+ }
+ /* Wait for PLL to be locked for the primary (1st) lane */
+ if (dp->phy[0]) {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS,
+ ZYNQMP_DP_TX_INTR_ALL);
+ zynqmp_dp_clr(dp->iomem, ZYNQMP_DP_TX_PHY_CONFIG,
+ ZYNQMP_DP_TX_PHY_CONFIG_ALL_RESET);
+ ret = xpsgtr_wait_pll_lock(dp->phy[0]);
+ if (ret) {
+ dev_err(dp->dev, "failed to lock pll\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_exit_phy - Exit the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Exit the phy.
+ */
+static void zynqmp_dp_exit_phy(struct zynqmp_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->num_lanes; i++) {
+ ret = phy_exit(dp->phy[i]);
+ if (ret)
+ dev_err(dp->dev, "failed to exit phy(%d) %d\n", i, ret);
+ }
+}
+
+/**
+ * zynqmp_dp_phy_ready - Check if PHY is ready
+ * @dp: DisplayPort IP core structure
+ *
+ * Check if PHY is ready. If PHY is not ready, wait 1ms to check for 100 times.
+ * This amount of delay was suggested by IP designer.
+ *
+ * Return: 0 if PHY is ready, or -ENODEV if PHY is not ready.
+ */
+static int zynqmp_dp_phy_ready(struct zynqmp_dp *dp)
+{
+ u32 i, reg, ready;
+
+ ready = (1 << dp->num_lanes) - 1;
+
+ /* Wait for 100 * 1ms. This should be enough time for PHY to be ready */
+ for (i = 0; ; i++) {
+ reg = zynqmp_dp_read(dp->iomem, ZYNQMP_DP_TX_PHY_STATUS);
+ if ((reg & ready) == ready)
+ return 0;
+
+ if (i == 100) {
+ dev_err(dp->dev, "PHY isn't ready\n");
+ return -ENODEV;
+ }
+
+ usleep_range(1000, 1100);
+ }
+
+ return 0;
+}
+
+/*
+ * Power Management functions
+ */
+/**
+ * zynqmp_dp_pm_resume - Resume DP IP
+ * @dp: DisplayPort IP core structure
+ *
+ * Resume the DP IP including PHY and pipeline.
+ */
+void zynqmp_dp_pm_resume(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_init_phy(dp);
+}
+/**
+ * zynqmp_dp_pm_suspend - Suspend DP IP
+ * @dp: DisplayPort IP core structure
+ *
+ * Suspend the DP IP including PHY and pipeline.
+ */
+void zynqmp_dp_pm_suspend(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_exit_phy(dp);
+}
+/*
+ * DP functions
+ */
+
+/**
+ * zynqmp_dp_max_rate - Calculate and return available max pixel clock
+ * @link_rate: link rate (Kilo-bytes / sec)
+ * @lane_num: number of lanes
+ * @bpp: bits per pixel
+ *
+ * Return: max pixel clock (KHz) supported by current link config.
+ */
+static inline int zynqmp_dp_max_rate(int link_rate, u8 lane_num, u8 bpp)
+{
+ return link_rate * lane_num * 8 / bpp;
+}
+
+/**
+ * zynqmp_dp_mode_configure - Configure the link values
+ * @dp: DisplayPort IP core structure
+ * @pclock: pixel clock for requested display mode
+ * @current_bw: current link rate
+ *
+ * Find the link configuration values, rate and lane count for requested pixel
+ * clock @pclock. The @pclock is stored in the mode to be used in other
+ * functions later. The returned rate is downshifted from the current rate
+ * @current_bw.
+ *
+ * Return: Current link rate code, or -EINVAL.
+ */
+static int zynqmp_dp_mode_configure(struct zynqmp_dp *dp, int pclock,
+ u8 current_bw)
+{
+ int max_rate = dp->link_config.max_rate;
+ u8 bws[3] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 max_link_rate_code = drm_dp_link_rate_to_bw_code(max_rate);
+ u8 bpp = dp->config.bpp;
+ u8 lane_cnt;
+ s8 i;
+
+ if (current_bw == DP_LINK_BW_1_62) {
+ dev_err(dp->dev, "can't downshift. already lowest link rate\n");
+ return -EINVAL;
+ }
+
+ for (i = ARRAY_SIZE(bws) - 1; i >= 0; i--) {
+ if (current_bw && bws[i] >= current_bw)
+ continue;
+
+ if (bws[i] <= max_link_rate_code)
+ break;
+ }
+
+ for (lane_cnt = 1; lane_cnt <= max_lanes; lane_cnt <<= 1) {
+ int bw;
+ u32 rate;
+
+ bw = drm_dp_bw_code_to_link_rate(bws[i]);
+ rate = zynqmp_dp_max_rate(bw, lane_cnt, bpp);
+ if (pclock <= rate) {
+ dp->mode.bw_code = bws[i];
+ dp->mode.lane_cnt = lane_cnt;
+ dp->mode.pclock = pclock;
+ return dp->mode.bw_code;
+ }
+ }
+
+ dev_err(dp->dev, "failed to configure link values\n");
+
+ return -EINVAL;
+}
+
+/**
+ * zynqmp_dp_adjust_train - Adjust train values
+ * @dp: DisplayPort IP core structure
+ * @link_status: link status from sink which contains requested training values
+ */
+static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 *train_set = dp->train_set;
+ u8 voltage = 0, preemphasis = 0;
+ u8 i;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ u8 v = drm_dp_get_adjust_request_voltage(link_status, i);
+ u8 p = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+
+ if (v > voltage)
+ voltage = v;
+
+ if (p > preemphasis)
+ preemphasis = p;
+ }
+
+ if (voltage >= DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
+ voltage |= DP_TRAIN_MAX_SWING_REACHED;
+
+ if (preemphasis >= DP_TRAIN_PRE_EMPH_LEVEL_2)
+ preemphasis |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++)
+ train_set[i] = voltage | preemphasis;
+}
+
+/**
+ * zynqmp_dp_update_vs_emph - Update the training values
+ * @dp: DisplayPort IP core structure
+ *
+ * Update the training values based on the request from sink. The mapped values
+ * are predefined, and values(vs, pe, pc) are from the device manual.
+ *
+ * Return: 0 if vs and emph are updated successfully, or the error code returned
+ * by drm_dp_dpcd_write().
+ */
+static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp)
+{
+ u8 *train_set = dp->train_set;
+ u8 i, v_level, p_level;
+ int ret;
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set,
+ dp->mode.lane_cnt);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ u32 reg = ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 + i * 4;
+
+ v_level = (train_set[i] & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ p_level = (train_set[i] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ xpsgtr_margining_factor(dp->phy[i], p_level, v_level);
+ xpsgtr_override_deemph(dp->phy[i], p_level, v_level);
+ zynqmp_dp_write(dp->iomem, reg, 0x2);
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_link_train_cr - Train clock recovery
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if clock recovery train is done successfully, or corresponding
+ * error code.
+ */
+static int zynqmp_dp_link_train_cr(struct zynqmp_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 vs = 0, tries = 0;
+ u16 max_tries, i;
+ bool cr_done;
+ int ret;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * 256 loops should be maximum iterations for 4 lanes and 4 values.
+ * So, This loop should exit before 512 iterations
+ */
+ for (max_tries = 0; max_tries < 512; max_tries++) {
+ ret = zynqmp_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_clock_recovery_delay(dp->dpcd);
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ cr_done = drm_dp_clock_recovery_ok(link_status, lane_cnt);
+ if (cr_done)
+ break;
+
+ for (i = 0; i < lane_cnt; i++)
+ if (!(dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED))
+ break;
+ if (i == lane_cnt)
+ break;
+
+ if ((dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == vs)
+ tries++;
+ else
+ tries = 0;
+
+ if (tries == DP_MAX_TRAINING_TRIES)
+ break;
+
+ vs = dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ zynqmp_dp_adjust_train(dp, link_status);
+ }
+
+ if (!cr_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_link_train_ce - Train channel equalization
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if channel equalization train is done successfully, or
+ * corresponding error code.
+ */
+static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 pat, tries;
+ int ret;
+ bool ce_done;
+
+ if (dp->dpcd[DP_DPCD_REV] >= DP_V1_2 &&
+ dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED)
+ pat = DP_TRAINING_PATTERN_3;
+ else
+ pat = DP_TRAINING_PATTERN_2;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRAINING_PATTERN_SET, pat);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ pat | DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ for (tries = 0; tries < DP_MAX_TRAINING_TRIES; tries++) {
+ ret = zynqmp_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_channel_eq_delay(dp->dpcd);
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ ce_done = drm_dp_channel_eq_ok(link_status, lane_cnt);
+ if (ce_done)
+ break;
+
+ zynqmp_dp_adjust_train(dp, link_status);
+ }
+
+ if (!ce_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_link_train - Train the link
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if all trains are done successfully, or corresponding error code.
+ */
+static int zynqmp_dp_train(struct zynqmp_dp *dp)
+{
+ u32 reg;
+ u8 bw_code = dp->mode.bw_code;
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 aux_lane_cnt = lane_cnt;
+ bool enhanced;
+ int ret;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_LANE_CNT_SET, lane_cnt);
+ enhanced = drm_dp_enhanced_frame_cap(dp->dpcd);
+ if (enhanced) {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENHANCED_FRAME_EN, 1);
+ aux_lane_cnt |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+
+ if (dp->dpcd[3] & 0x1) {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_DOWNSPREAD_CTL, 1);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL,
+ DP_SPREAD_AMP_0_5);
+ } else {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_DOWNSPREAD_CTL, 0);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL, 0);
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET, aux_lane_cnt);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to set lane count\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+ DP_SET_ANSI_8B10B);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to set ANSI 8B/10B encoding\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LINK_BW_SET, bw_code);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to set DP bandwidth\n");
+ return ret;
+ }
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_LINK_BW_SET, bw_code);
+ switch (bw_code) {
+ case DP_LINK_BW_1_62:
+ reg = ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162;
+ break;
+ case DP_LINK_BW_2_7:
+ reg = ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270;
+ break;
+ case DP_LINK_BW_5_4:
+ default:
+ reg = ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540;
+ break;
+ }
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING,
+ reg);
+ ret = zynqmp_dp_phy_ready(dp);
+ if (ret < 0)
+ return ret;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_SCRAMBLING_DISABLE, 1);
+ memset(dp->train_set, 0, 4);
+ ret = zynqmp_dp_link_train_cr(dp);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_dp_link_train_ce(dp);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to disable training pattern\n");
+ return ret;
+ }
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_SCRAMBLING_DISABLE, 0);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_train_loop - Downshift the link rate during training
+ * @dp: DisplayPort IP core structure
+ *
+ * Train the link by downshifting the link rate if training is not successful.
+ */
+static void zynqmp_dp_train_loop(struct zynqmp_dp *dp)
+{
+ struct zynqmp_dp_mode *mode = &dp->mode;
+ u8 bw = mode->bw_code;
+ int ret;
+
+ do {
+ if (dp->status == connector_status_disconnected ||
+ !dp->enabled)
+ return;
+
+ ret = zynqmp_dp_train(dp);
+ if (!ret)
+ return;
+
+ ret = zynqmp_dp_mode_configure(dp, mode->pclock, bw);
+ if (ret < 0)
+ goto err_out;
+
+ bw = ret;
+ } while (bw >= DP_LINK_BW_1_62);
+
+err_out:
+ dev_err(dp->dev, "failed to train the DP link\n");
+}
+
+/*
+ * DP Aux functions
+ */
+
+#define AUX_READ_BIT 0x1
+
+/**
+ * zynqmp_dp_aux_cmd_submit - Submit aux command
+ * @dp: DisplayPort IP core structure
+ * @cmd: aux command
+ * @addr: aux address
+ * @buf: buffer for command data
+ * @bytes: number of bytes for @buf
+ * @reply: reply code to be returned
+ *
+ * Submit an aux command. All aux related commands, native or i2c aux
+ * read/write, are submitted through this function. The function is mapped to
+ * the transfer function of struct drm_dp_aux. This function involves in
+ * multiple register reads/writes, thus synchronization is needed, and it is
+ * done by drm_dp_helper using @hw_mutex. The calling thread goes into sleep
+ * if there's no immediate reply to the command submission. The reply code is
+ * returned at @reply if @reply != NULL.
+ *
+ * Return: 0 if the command is submitted properly, or corresponding error code:
+ * -EBUSY when there is any request already being processed
+ * -ETIMEDOUT when receiving reply is timed out
+ * -EIO when received bytes are less than requested
+ */
+static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr,
+ u8 *buf, u8 bytes, u8 *reply)
+{
+ bool is_read = (cmd & AUX_READ_BIT) ? true : false;
+ void __iomem *iomem = dp->iomem;
+ u32 reg, i;
+
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_INTR_SIGNAL_STATE);
+ if (reg & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REQUEST)
+ return -EBUSY;
+
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUX_ADDRESS, addr);
+ if (!is_read)
+ for (i = 0; i < bytes; i++)
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUX_WRITE_FIFO,
+ buf[i]);
+
+ reg = cmd << ZYNQMP_DP_TX_AUX_COMMAND_CMD_SHIFT;
+ if (!buf || !bytes)
+ reg |= ZYNQMP_DP_TX_AUX_COMMAND_ADDRESS_ONLY;
+ else
+ reg |= (bytes - 1) << ZYNQMP_DP_TX_AUX_COMMAND_BYTES_SHIFT;
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUX_COMMAND, reg);
+
+ /* Wait for reply to be delivered upto 2ms */
+ for (i = 0; ; i++) {
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_INTR_SIGNAL_STATE);
+ if (reg & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY)
+ break;
+
+ if (reg & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT ||
+ i == 2)
+ return -ETIMEDOUT;
+
+ usleep_range(1000, 1100);
+ }
+
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_AUX_REPLY_CODE);
+ if (reply)
+ *reply = reg;
+
+ if (is_read &&
+ (reg == ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_ACK ||
+ reg == ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_ACK)) {
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_REPLY_DATA_CNT);
+ if ((reg & ZYNQMP_DP_TX_AUX_REPLY_CNT_MASK) != bytes)
+ return -EIO;
+
+ for (i = 0; i < bytes; i++) {
+ buf[i] = zynqmp_dp_read(iomem,
+ ZYNQMP_DP_TX_AUX_REPLY_DATA);
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t
+zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct zynqmp_dp *dp = container_of(aux, struct zynqmp_dp, aux);
+ int ret;
+ unsigned int i, iter;
+
+ /* Number of loops = timeout in msec / aux delay (400 usec) */
+ iter = zynqmp_dp_aux_timeout_ms * 1000 / 400;
+ iter = iter ? iter : 1;
+
+ for (i = 0; i < iter; i++) {
+ ret = zynqmp_dp_aux_cmd_submit(dp, msg->request, msg->address,
+ msg->buffer, msg->size,
+ &msg->reply);
+ if (!ret) {
+ dev_dbg(dp->dev, "aux %d retries\n", i);
+ return msg->size;
+ }
+
+ if (dp->status == connector_status_disconnected) {
+ dev_dbg(dp->dev, "no connected aux device\n");
+ return -ENODEV;
+ }
+
+ usleep_range(400, 500);
+ }
+
+ dev_dbg(dp->dev, "failed to do aux transfer (%d)\n", ret);
+
+ return ret;
+}
+
+/**
+ * zynqmp_dp_init_aux - Initialize the DP aux
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the DP aux. The aux clock is derived from the axi clock, so
+ * this function gets the axi clock frequency and calculates the filter
+ * value. Additionally, the interrupts and transmitter are enabled.
+ *
+ * Return: 0 on success, error value otherwise
+ */
+static int zynqmp_dp_init_aux(struct zynqmp_dp *dp)
+{
+ unsigned int rate;
+ u32 reg, w;
+
+ rate = zynqmp_disp_get_apb_clk_rate(dp->dpsub->disp);
+ if (rate < ZYNQMP_DP_TX_CLK_DIVIDER_MHZ) {
+ dev_err(dp->dev, "aclk should be higher than 1MHz\n");
+ return -EINVAL;
+ }
+
+ /* Allowable values for this register are: 8, 16, 24, 32, 40, 48 */
+ for (w = 8; w <= 48; w += 8) {
+ /* AUX pulse width should be between 0.4 to 0.6 usec */
+ if (w >= (4 * rate / 10000000) &&
+ w <= (6 * rate / 10000000))
+ break;
+ }
+
+ if (w > 48) {
+ dev_err(dp->dev, "aclk frequency too high\n");
+ return -EINVAL;
+ }
+ reg = w << ZYNQMP_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT;
+ reg |= rate / ZYNQMP_DP_TX_CLK_DIVIDER_MHZ;
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_CLK_DIVIDER, reg);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_EN,
+ ZYNQMP_DP_TX_INTR_ALL);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS,
+ ZYNQMP_DP_TX_NO_INTR_ALL);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 1);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_exit_aux - De-initialize the DP aux
+ * @dp: DisplayPort IP core structure
+ *
+ * De-initialize the DP aux. Disable all interrupts which are enabled
+ * through aux initialization, as well as the transmitter.
+ */
+static void zynqmp_dp_exit_aux(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 0);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS, 0xffffffff);
+}
+
+/*
+ * Generic DP functions
+ */
+
+/**
+ * zynqmp_dp_update_misc - Write the misc registers
+ * @dp: DisplayPort IP core structure
+ *
+ * The misc register values are stored in the structure, and this
+ * function applies the values into the registers.
+ */
+static void zynqmp_dp_update_misc(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_MAIN_STREAM_MISC0,
+ dp->config.misc0);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_MAIN_STREAM_MISC1,
+ dp->config.misc1);
+}
+
+/**
+ * zynqmp_dp_set_sync_mode - Set the sync mode bit in the software misc state
+ * @dp: DisplayPort IP core structure
+ * @mode: flag if the sync mode should be on or off
+ *
+ * Set the bit in software misc state. To apply to hardware,
+ * zynqmp_dp_update_misc() should be called.
+ */
+static void zynqmp_dp_set_sync_mode(struct zynqmp_dp *dp, bool mode)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ if (mode)
+ config->misc0 |= ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC;
+ else
+ config->misc0 &= ~ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC;
+}
+
+/**
+ * zynqmp_dp_get_sync_mode - Get the sync mode state
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: true if the sync mode is on, or false
+ */
+static bool zynqmp_dp_get_sync_mode(struct zynqmp_dp *dp)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ return !!(config->misc0 & ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC);
+}
+
+/**
+ * zynqmp_dp_set_bpc - Set bpc value in software misc state
+ * @dp: DisplayPort IP core structure
+ * @bpc: bits per component
+ *
+ * Return: 0 on success, or the fallback bpc value
+ */
+static u8 zynqmp_dp_set_bpc(struct zynqmp_dp *dp, u8 bpc)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+ u8 ret = 0;
+
+ if (dp->connector.display_info.bpc &&
+ dp->connector.display_info.bpc != bpc) {
+ dev_err(dp->dev, "requested bpc (%u) != display info (%u)\n",
+ bpc, dp->connector.display_info.bpc);
+ bpc = dp->connector.display_info.bpc;
+ }
+
+ config->misc0 &= ~ZYNQMP_DP_MISC0_BPC_MASK;
+ switch (bpc) {
+ case 6:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_6;
+ break;
+ case 8:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_8;
+ break;
+ case 10:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_10;
+ break;
+ case 12:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_12;
+ break;
+ case 16:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_16;
+ break;
+ default:
+ dev_err(dp->dev, "Not supported bpc (%u). fall back to 8bpc\n",
+ bpc);
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_8;
+ ret = 8;
+ break;
+ }
+ config->bpc = bpc;
+ zynqmp_dp_update_bpp(dp);
+
+ return ret;
+}
+
+/**
+ * zynqmp_dp_get_bpc - Set bpc value from software state
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: current bpc value
+ */
+static u8 zynqmp_dp_get_bpc(struct zynqmp_dp *dp)
+{
+ return dp->config.bpc;
+}
+
+/**
+ * zynqmp_dp_encoder_mode_set_transfer_unit - Set the transfer unit values
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Set the transfer unit, and caculate all transfer unit size related values.
+ * Calculation is based on DP and IP core specification.
+ */
+static void
+zynqmp_dp_encoder_mode_set_transfer_unit(struct zynqmp_dp *dp,
+ struct drm_display_mode *mode)
+{
+ u32 tu = ZYNQMP_DP_TX_DEF_TRANSFER_UNIT_SIZE;
+ u32 bw, vid_kbytes, avg_bytes_per_tu, init_wait;
+
+ /* Use the max transfer unit size (default) */
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRANSFER_UNIT_SIZE, tu);
+
+ vid_kbytes = mode->clock * (dp->config.bpp / 8);
+ bw = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ avg_bytes_per_tu = vid_kbytes * tu / (dp->mode.lane_cnt * bw / 1000);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_MIN_BYTES_PER_TU,
+ avg_bytes_per_tu / 1000);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_FRAC_BYTES_PER_TU,
+ avg_bytes_per_tu % 1000);
+
+ /* Configure the initial wait cycle based on transfer unit size */
+ if (tu < (avg_bytes_per_tu / 1000))
+ init_wait = 0;
+ else if ((avg_bytes_per_tu / 1000) <= 4)
+ init_wait = tu;
+ else
+ init_wait = tu - avg_bytes_per_tu / 1000;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_INIT_WAIT, init_wait);
+}
+
+/**
+ * zynqmp_dp_encoder_mode_set_stream - Configure the main stream
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Configure the main stream based on the requested mode @mode. Calculation is
+ * based on IP core specification.
+ */
+void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
+ struct drm_display_mode *mode)
+{
+ void __iomem *iomem = dp->iomem;
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 reg, wpl;
+ unsigned int rate;
+
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HTOTAL, mode->htotal);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VTOTAL, mode->vtotal);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_POLARITY,
+ (!!(mode->flags & DRM_MODE_FLAG_PVSYNC) <<
+ ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT) |
+ (!!(mode->flags & DRM_MODE_FLAG_PHSYNC) <<
+ ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT));
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HSWIDTH,
+ mode->hsync_end - mode->hsync_start);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VSWIDTH,
+ mode->vsync_end - mode->vsync_start);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HRES, mode->hdisplay);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VRES, mode->vdisplay);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HSTART,
+ mode->htotal - mode->hsync_start);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VSTART,
+ mode->vtotal - mode->vsync_start);
+
+ /* In synchronous mode, set the diviers */
+ if (dp->config.misc0 & ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC) {
+ reg = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_N_VID, reg);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_M_VID, mode->clock);
+ rate = zynqmp_disp_get_aud_clk_rate(dp->dpsub->disp);
+ if (rate) {
+ dev_dbg(dp->dev, "Audio rate: %d\n", rate / 512);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_N_AUD, reg);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_M_AUD,
+ rate / 1000);
+ }
+ }
+
+ /* Only 2 channel audio is supported now */
+ if (zynqmp_disp_aud_enabled(dp->dpsub->disp))
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_CHANNELS, 1);
+
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_USER_PIXEL_WIDTH, 1);
+
+ /* Translate to the native 16 bit datapath based on IP core spec */
+ wpl = (mode->hdisplay * dp->config.bpp + 15) / 16;
+ reg = wpl + wpl % lane_cnt - lane_cnt;
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_USER_DATA_CNT_PER_LANE, reg);
+}
+
+/*
+ * DRM connector functions
+ */
+
+static enum drm_connector_status
+zynqmp_dp_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ struct zynqmp_dp_link_config *link_config = &dp->link_config;
+ u32 state, i;
+ int ret;
+
+ /*
+ * This is from heuristic. It takes some delay (ex, 100 ~ 500 msec) to
+ * get the HPD signal with some monitors.
+ */
+ for (i = 0; i < 10; i++) {
+ state = zynqmp_dp_read(dp->iomem,
+ ZYNQMP_DP_TX_INTR_SIGNAL_STATE);
+ if (state & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_HPD)
+ break;
+ msleep(100);
+ }
+
+ if (state & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_HPD) {
+ dp->status = connector_status_connected;
+ ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0) {
+ dev_dbg(dp->dev, "DPCD read first try fails");
+ ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0) {
+ dev_dbg(dp->dev, "DPCD read retry fails");
+ goto disconnected;
+ }
+ }
+
+ link_config->max_rate = min_t(int,
+ drm_dp_max_link_rate(dp->dpcd),
+ DP_HIGH_BIT_RATE2);
+ link_config->max_lanes = min_t(u8,
+ drm_dp_max_lane_count(dp->dpcd),
+ dp->num_lanes);
+
+ return connector_status_connected;
+ }
+
+disconnected:
+ dp->status = connector_status_disconnected;
+ return connector_status_disconnected;
+}
+
+static int zynqmp_dp_connector_get_modes(struct drm_connector *connector)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &dp->aux.ddc);
+ if (!edid)
+ return 0;
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static struct drm_encoder *
+zynqmp_dp_connector_best_encoder(struct drm_connector *connector)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+
+ return &dp->encoder;
+}
+
+static int zynqmp_dp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 bpp = dp->config.bpp;
+ int max_rate = dp->link_config.max_rate;
+ int rate;
+
+ if (mode->clock > ZYNQMP_MAX_FREQ) {
+ dev_dbg(dp->dev, "filtered the mode, %s,for high pixel rate\n",
+ mode->name);
+ drm_mode_debug_printmodeline(mode);
+ return MODE_CLOCK_HIGH;
+ }
+
+ /* Check with link rate and lane count */
+ rate = zynqmp_dp_max_rate(max_rate, max_lanes, bpp);
+ if (mode->clock > rate) {
+ dev_dbg(dp->dev, "filtered the mode, %s,for high pixel rate\n",
+ mode->name);
+ drm_mode_debug_printmodeline(mode);
+ return MODE_CLOCK_HIGH;
+ }
+
+ return MODE_OK;
+}
+
+static void zynqmp_dp_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static int
+zynqmp_dp_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ int ret;
+
+ if (property == dp->sync_prop) {
+ zynqmp_dp_set_sync_mode(dp, val);
+ } else if (property == dp->bpc_prop) {
+ u8 bpc;
+
+ bpc = zynqmp_dp_set_bpc(dp, val);
+ if (bpc) {
+ drm_object_property_set_value(&connector->base,
+ property, bpc);
+ ret = -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+zynqmp_dp_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+
+ if (property == dp->sync_prop)
+ *val = zynqmp_dp_get_sync_mode(dp);
+ else if (property == dp->bpc_prop)
+ *val = zynqmp_dp_get_bpc(dp);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct drm_connector_funcs zynqmp_dp_connector_funcs = {
+ .detect = zynqmp_dp_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = zynqmp_dp_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_set_property = zynqmp_dp_connector_atomic_set_property,
+ .atomic_get_property = zynqmp_dp_connector_atomic_get_property,
+};
+
+static struct drm_connector_helper_funcs zynqmp_dp_connector_helper_funcs = {
+ .get_modes = zynqmp_dp_connector_get_modes,
+ .best_encoder = zynqmp_dp_connector_best_encoder,
+ .mode_valid = zynqmp_dp_connector_mode_valid,
+};
+
+/*
+ * DRM encoder functions
+ */
+
+static void zynqmp_dp_encoder_enable(struct drm_encoder *encoder)
+{
+ struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ void __iomem *iomem = dp->iomem;
+ unsigned int i;
+ int ret = 0;
+
+ pm_runtime_get_sync(dp->dev);
+ dp->enabled = true;
+ zynqmp_dp_init_aux(dp);
+ zynqmp_dp_update_misc(dp);
+ if (zynqmp_disp_aud_enabled(dp->dpsub->disp))
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_CONTROL, 1);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_PHY_POWER_DOWN, 0);
+ if (dp->status == connector_status_connected) {
+ for (i = 0; i < 3; i++) {
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ usleep_range(300, 500);
+ }
+ /* Some monitors take time to wake up properly */
+ msleep(zynqmp_dp_power_on_delay_ms);
+ }
+ if (ret != 1)
+ dev_dbg(dp->dev, "DP aux failed\n");
+ else
+ zynqmp_dp_train_loop(dp);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_SW_RESET,
+ ZYNQMP_DP_TX_SW_RESET_ALL);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_ENABLE_MAIN_STREAM, 1);
+}
+
+static void zynqmp_dp_encoder_disable(struct drm_encoder *encoder)
+{
+ struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ void __iomem *iomem = dp->iomem;
+
+ dp->enabled = false;
+ cancel_delayed_work(&dp->hpd_work);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_ENABLE_MAIN_STREAM, 0);
+ drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_PHY_POWER_DOWN,
+ ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
+ if (zynqmp_disp_aud_enabled(dp->dpsub->disp))
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_CONTROL, 0);
+ pm_runtime_put_sync(dp->dev);
+}
+
+static void
+zynqmp_dp_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
+{
+ struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 bpp = dp->config.bpp;
+ int rate, max_rate = dp->link_config.max_rate;
+ int ret;
+
+ /* Check again as bpp or format might have been chagned */
+ rate = zynqmp_dp_max_rate(max_rate, max_lanes, bpp);
+ if (mode->clock > rate) {
+ dev_err(dp->dev, "the mode, %s,has too high pixel rate\n",
+ mode->name);
+ drm_mode_debug_printmodeline(mode);
+ }
+
+ ret = zynqmp_dp_mode_configure(dp, adjusted_mode->clock, 0);
+ if (ret < 0)
+ return;
+
+ zynqmp_dp_encoder_mode_set_transfer_unit(dp, adjusted_mode);
+}
+
+#define ZYNQMP_DP_MIN_H_BACKPORCH 20
+
+static int
+zynqmp_dp_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ int diff = mode->htotal - mode->hsync_end;
+
+ /*
+ * ZynqMP DP requires horizontal backporch to be greater than 12.
+ * This limitation may not be compatible with the sink device.
+ */
+ if (diff < ZYNQMP_DP_MIN_H_BACKPORCH) {
+ int vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+
+ dev_dbg(encoder->dev->dev, "hbackporch adjusted: %d to %d",
+ diff, ZYNQMP_DP_MIN_H_BACKPORCH - diff);
+ diff = ZYNQMP_DP_MIN_H_BACKPORCH - diff;
+ adjusted_mode->htotal += diff;
+ adjusted_mode->clock = adjusted_mode->vtotal *
+ adjusted_mode->htotal * vrefresh / 1000;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs zynqmp_dp_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_encoder_helper_funcs zynqmp_dp_encoder_helper_funcs = {
+ .enable = zynqmp_dp_encoder_enable,
+ .disable = zynqmp_dp_encoder_disable,
+ .atomic_mode_set = zynqmp_dp_encoder_atomic_mode_set,
+ .atomic_check = zynqmp_dp_encoder_atomic_check,
+};
+
+/*
+ * Component functions
+ */
+
+static void zynqmp_dp_hpd_work_func(struct work_struct *work)
+{
+ struct zynqmp_dp *dp;
+
+ dp = container_of(work, struct zynqmp_dp, hpd_work.work);
+
+ if (dp->drm)
+ drm_helper_hpd_irq_event(dp->drm);
+}
+
+static struct drm_prop_enum_list zynqmp_dp_bpc_enum[] = {
+ { 6, "6BPC" },
+ { 8, "8BPC" },
+ { 10, "10BPC" },
+ { 12, "12BPC" },
+};
+
+int zynqmp_dp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_dp *dp = dpsub->dp;
+ struct drm_encoder *encoder = &dp->encoder;
+ struct drm_connector *connector = &dp->connector;
+ struct drm_device *drm = data;
+ struct device_node *port;
+ int ret;
+
+ if (!dp->num_lanes)
+ return 0;
+
+ encoder->possible_crtcs |= zynqmp_disp_get_crtc_mask(dpsub->disp);
+ for_each_child_of_node(dev->of_node, port) {
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+ encoder->possible_crtcs |= drm_of_find_possible_crtcs(drm,
+ port);
+ }
+ drm_encoder_init(drm, encoder, &zynqmp_dp_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ drm_encoder_helper_add(encoder, &zynqmp_dp_encoder_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ ret = drm_connector_init(encoder->dev, connector,
+ &zynqmp_dp_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ dev_err(dp->dev, "failed to initialize the drm connector");
+ goto error_encoder;
+ }
+
+ drm_connector_helper_add(connector, &zynqmp_dp_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ connector->dpms = DRM_MODE_DPMS_OFF;
+
+ dp->drm = drm;
+ dp->sync_prop = drm_property_create_bool(drm, 0, "sync");
+ dp->bpc_prop = drm_property_create_enum(drm, 0, "bpc",
+ zynqmp_dp_bpc_enum,
+ ARRAY_SIZE(zynqmp_dp_bpc_enum));
+
+ dp->config.misc0 &= ~ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC;
+ drm_object_attach_property(&connector->base, dp->sync_prop, false);
+ ret = zynqmp_dp_set_bpc(dp, 8);
+ drm_object_attach_property(&connector->base, dp->bpc_prop,
+ ret ? ret : 8);
+ zynqmp_dp_update_bpp(dp);
+
+ INIT_DELAYED_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func);
+
+ /* This enables interrupts, so should be called after DRM init */
+ ret = zynqmp_dp_init_aux(dp);
+ if (ret) {
+ dev_err(dp->dev, "failed to initialize DP aux");
+ goto error_prop;
+ }
+
+ return 0;
+
+error_prop:
+ drm_property_destroy(dp->drm, dp->bpc_prop);
+ drm_property_destroy(dp->drm, dp->sync_prop);
+ zynqmp_dp_connector_destroy(&dp->connector);
+error_encoder:
+ drm_encoder_cleanup(&dp->encoder);
+ return ret;
+}
+
+void zynqmp_dp_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_dp *dp = dpsub->dp;
+
+ disable_irq(dp->irq);
+ if (!dp->num_lanes)
+ return;
+
+ cancel_delayed_work_sync(&dp->hpd_work);
+ zynqmp_dp_exit_aux(dp);
+ drm_property_destroy(dp->drm, dp->bpc_prop);
+ drm_property_destroy(dp->drm, dp->sync_prop);
+ zynqmp_dp_connector_destroy(&dp->connector);
+ drm_encoder_cleanup(&dp->encoder);
+}
+
+/*
+ * Platform functions
+ */
+
+static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
+{
+ struct zynqmp_dp *dp = (struct zynqmp_dp *)data;
+ u32 status, mask;
+
+ status = zynqmp_dp_read(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_STATUS);
+ mask = zynqmp_dp_read(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_MASK);
+ if (!(status & ~mask))
+ return IRQ_NONE;
+
+ /* dbg for diagnostic, but not much that the driver can do */
+ if (status & ZYNQMP_DP_TX_INTR_CHBUF_UNDERFLW_MASK)
+ dev_dbg_ratelimited(dp->dev, "underflow interrupt\n");
+ if (status & ZYNQMP_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+ dev_dbg_ratelimited(dp->dev, "overflow interrupt\n");
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_STATUS, status);
+
+ /* The DP vblank will not be enabled with remote crtc device */
+ if (status & ZYNQMP_DP_TX_INTR_VBLANK_START)
+ zynqmp_disp_handle_vblank(dp->dpsub->disp);
+
+ if (status & ZYNQMP_DP_TX_INTR_HPD_EVENT)
+ schedule_delayed_work(&dp->hpd_work, 0);
+
+ if (status & ZYNQMP_DP_TX_INTR_HPD_IRQ) {
+ int ret;
+ u8 status[DP_LINK_STATUS_SIZE + 2];
+
+ ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
+ DP_LINK_STATUS_SIZE + 2);
+ if (ret < 0)
+ goto handled;
+
+ if (status[4] & DP_LINK_STATUS_UPDATED ||
+ !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
+ !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) {
+ zynqmp_dp_train_loop(dp);
+ }
+ }
+
+handled:
+ return IRQ_HANDLED;
+}
+
+int zynqmp_dp_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub;
+ struct zynqmp_dp *dp;
+ struct resource *res;
+ unsigned int i;
+ int irq, ret;
+
+ dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->dpms = DRM_MODE_DPMS_OFF;
+ dp->status = connector_status_disconnected;
+ dp->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp");
+ dp->iomem = devm_ioremap_resource(dp->dev, res);
+ if (IS_ERR(dp->iomem))
+ return PTR_ERR(dp->iomem);
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_PHY_POWER_DOWN,
+ ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
+ zynqmp_dp_set(dp->iomem, ZYNQMP_DP_TX_PHY_CONFIG,
+ ZYNQMP_DP_TX_PHY_CONFIG_ALL_RESET);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_FORCE_SCRAMBLER_RESET, 1);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 0);
+
+ dp->num_lanes = 2;
+ for (i = 0; i < ZYNQMP_DP_MAX_LANES; i++) {
+ char phy_name[16];
+
+ snprintf(phy_name, sizeof(phy_name), "dp-phy%d", i);
+ dp->phy[i] = devm_phy_get(dp->dev, phy_name);
+ if (IS_ERR(dp->phy[i])) {
+ ret = PTR_ERR(dp->phy[i]);
+ dp->phy[i] = NULL;
+
+ /* 2nd lane is optional */
+ if (i == 1 && ret == -ENODEV) {
+ dp->num_lanes = 1;
+ break;
+ }
+
+ /*
+ * If no phy lane is assigned, the DP Tx gets disabled.
+ * The display part of the DP subsystem can still be
+ * used to drive the output to FPGA, thus let the DP
+ * subsystem driver to proceed without this DP Tx.
+ */
+ if (i == 0 && ret == -ENODEV) {
+ dp->num_lanes = 0;
+ goto out;
+ }
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dp->dev, "failed to get phy lane\n");
+
+ return ret;
+ }
+ }
+
+ ret = zynqmp_dp_init_phy(dp);
+ if (ret)
+ goto error_phy;
+
+ dp->aux.name = "ZynqMP DP AUX";
+ dp->aux.dev = dp->dev;
+ dp->aux.transfer = zynqmp_dp_aux_transfer;
+ ret = drm_dp_aux_register(&dp->aux);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to initialize DP aux\n");
+ goto error;
+ }
+
+out:
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto error;
+ }
+
+ ret = devm_request_threaded_irq(dp->dev, irq, NULL,
+ zynqmp_dp_irq_handler, IRQF_ONESHOT,
+ dev_name(dp->dev), dp);
+ if (ret < 0)
+ goto error;
+ dp->irq = irq;
+
+ dpsub = platform_get_drvdata(pdev);
+ dpsub->dp = dp;
+ dp->dpsub = dpsub;
+
+ dev_dbg(dp->dev,
+ "ZynqMP DisplayPort Tx driver probed with %u phy lanes\n",
+ dp->num_lanes);
+
+ return 0;
+
+error:
+ drm_dp_aux_unregister(&dp->aux);
+error_phy:
+ zynqmp_dp_exit_phy(dp);
+ return ret;
+}
+
+int zynqmp_dp_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ struct zynqmp_dp *dp = dpsub->dp;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 0);
+ drm_dp_aux_unregister(&dp->aux);
+ zynqmp_dp_exit_phy(dp);
+ dpsub->dp = NULL;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.h b/drivers/gpu/drm/xlnx/zynqmp_dp.h
new file mode 100644
index 000000000000..2f6ce3f3e8cf
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.h
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DisplayPort Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZYNQMP_DP_H_
+#define _ZYNQMP_DP_H_
+
+struct zynqmp_dp;
+struct drm_display_mode;
+
+const int zynqmp_dp_set_color(struct zynqmp_dp *dp, const char *color);
+void zynqmp_dp_enable_vblank(struct zynqmp_dp *dp);
+void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp);
+void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
+ struct drm_display_mode *mode);
+void __maybe_unused zynqmp_dp_pm_suspend(struct zynqmp_dp *dp);
+void __maybe_unused zynqmp_dp_pm_resume(struct zynqmp_dp *dp);
+int zynqmp_dp_bind(struct device *dev, struct device *master, void *data);
+void zynqmp_dp_unbind(struct device *dev, struct device *master, void *data);
+
+int zynqmp_dp_probe(struct platform_device *pdev);
+int zynqmp_dp_remove(struct platform_device *pdev);
+
+#endif /* _ZYNQMP_DP_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
new file mode 100644
index 000000000000..9b3545348f7b
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DP Subsystem Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "xlnx_drv.h"
+
+#include "zynqmp_disp.h"
+#include "zynqmp_dp.h"
+#include "zynqmp_dpsub.h"
+
+static int
+zynqmp_dpsub_bind(struct device *dev, struct device *master, void *data)
+{
+ int ret;
+
+ ret = zynqmp_disp_bind(dev, master, data);
+ if (ret)
+ return ret;
+
+ /* zynqmp_disp should bind first, so zynqmp_dp encoder can find crtc */
+ ret = zynqmp_dp_bind(dev, master, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+zynqmp_dpsub_unbind(struct device *dev, struct device *master, void *data)
+{
+ zynqmp_dp_unbind(dev, master, data);
+ zynqmp_disp_unbind(dev, master, data);
+}
+
+static const struct component_ops zynqmp_dpsub_component_ops = {
+ .bind = zynqmp_dpsub_bind,
+ .unbind = zynqmp_dpsub_unbind,
+};
+
+static int zynqmp_dpsub_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub;
+ int ret;
+
+ dpsub = devm_kzalloc(&pdev->dev, sizeof(*dpsub), GFP_KERNEL);
+ if (!dpsub)
+ return -ENOMEM;
+
+ /* Sub-driver will access dpsub from drvdata */
+ platform_set_drvdata(pdev, dpsub);
+ pm_runtime_enable(&pdev->dev);
+
+ /*
+ * DP should be probed first so that the zynqmp_disp can set the output
+ * format accordingly.
+ */
+ ret = zynqmp_dp_probe(pdev);
+ if (ret)
+ goto err_pm;
+
+ ret = zynqmp_disp_probe(pdev);
+ if (ret)
+ goto err_dp;
+
+ ret = component_add(&pdev->dev, &zynqmp_dpsub_component_ops);
+ if (ret)
+ goto err_disp;
+
+ /* Try the reserved memory. Proceed if there's none */
+ of_reserved_mem_device_init(&pdev->dev);
+
+ /* Populate the sound child nodes */
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to populate child nodes\n");
+ goto err_rmem;
+ }
+
+ dpsub->master = xlnx_drm_pipeline_init(pdev);
+ if (IS_ERR(dpsub->master)) {
+ dev_err(&pdev->dev, "failed to initialize the drm pipeline\n");
+ goto err_populate;
+ }
+
+ dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed");
+
+ return 0;
+
+err_populate:
+ of_platform_depopulate(&pdev->dev);
+err_rmem:
+ of_reserved_mem_device_release(&pdev->dev);
+ component_del(&pdev->dev, &zynqmp_dpsub_component_ops);
+err_disp:
+ zynqmp_disp_remove(pdev);
+err_dp:
+ zynqmp_dp_remove(pdev);
+err_pm:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int zynqmp_dpsub_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ int err, ret = 0;
+
+ xlnx_drm_pipeline_exit(dpsub->master);
+ of_platform_depopulate(&pdev->dev);
+ of_reserved_mem_device_release(&pdev->dev);
+ component_del(&pdev->dev, &zynqmp_dpsub_component_ops);
+
+ err = zynqmp_disp_remove(pdev);
+ if (err)
+ ret = -EIO;
+
+ err = zynqmp_dp_remove(pdev);
+ if (err)
+ ret = -EIO;
+
+ pm_runtime_disable(&pdev->dev);
+
+ return err;
+}
+
+static int __maybe_unused zynqmp_dpsub_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+
+ zynqmp_dp_pm_suspend(dpsub->dp);
+
+ return 0;
+}
+
+static int __maybe_unused zynqmp_dpsub_pm_resume(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+
+ zynqmp_dp_pm_resume(dpsub->dp);
+
+ return 0;
+}
+
+static const struct dev_pm_ops zynqmp_dpsub_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dpsub_pm_suspend,
+ zynqmp_dpsub_pm_resume)
+};
+
+static const struct of_device_id zynqmp_dpsub_of_match[] = {
+ { .compatible = "xlnx,zynqmp-dpsub-1.7", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, zynqmp_dpsub_of_match);
+
+static struct platform_driver zynqmp_dpsub_driver = {
+ .probe = zynqmp_dpsub_probe,
+ .remove = zynqmp_dpsub_remove,
+ .driver = {
+ .name = "zynqmp-display",
+ .of_match_table = zynqmp_dpsub_of_match,
+ .pm = &zynqmp_dpsub_pm_ops,
+ },
+};
+
+module_platform_driver(zynqmp_dpsub_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("ZynqMP DP Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
new file mode 100644
index 000000000000..6606beffee15
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DPSUB Subsystem Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZYNQMP_DPSUB_H_
+#define _ZYNQMP_DPSUB_H_
+
+struct zynqmp_dpsub {
+ struct zynqmp_dp *dp;
+ struct zynqmp_disp *disp;
+ struct platform_device *master;
+};
+
+#endif /* _ZYNQMP_DPSUB_H_ */
diff --git a/drivers/gpu/drm/zocl/Kconfig b/drivers/gpu/drm/zocl/Kconfig
new file mode 100644
index 000000000000..6a54d01cccd1
--- /dev/null
+++ b/drivers/gpu/drm/zocl/Kconfig
@@ -0,0 +1,8 @@
+config DRM_ZOCL
+ tristate "Xilinx Zynq OpenCL"
+ depends on DRM
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ Xilinx Zynq OpenCL Manager
diff --git a/drivers/gpu/drm/zocl/Makefile b/drivers/gpu/drm/zocl/Makefile
new file mode 100644
index 000000000000..da58e5084f9d
--- /dev/null
+++ b/drivers/gpu/drm/zocl/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+zocl-y := zocl_drv.o zocl_bo.o
+
+obj-$(CONFIG_DRM_ZOCL) += zocl.o
diff --git a/drivers/gpu/drm/zocl/zocl_bo.c b/drivers/gpu/drm/zocl/zocl_bo.c
new file mode 100644
index 000000000000..123a37842ad4
--- /dev/null
+++ b/drivers/gpu/drm/zocl/zocl_bo.c
@@ -0,0 +1,271 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
+#include "zocl_drv.h"
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <linux/zocl_ioctl.h>
+
+static inline void __user *to_user_ptr(u64 address)
+{
+ return (void __user *)(uintptr_t)address;
+}
+
+void zocl_describe(const struct drm_zocl_bo *obj)
+{
+ size_t size_in_kb = obj->base.base.size / 1024;
+ size_t physical_addr = obj->base.paddr;
+
+ DRM_INFO("%p: H[0x%zxKB] D[0x%zx]\n",
+ obj,
+ size_in_kb,
+ physical_addr);
+}
+
+static struct drm_zocl_bo *zocl_create_bo(struct drm_device *dev,
+ uint64_t unaligned_size)
+{
+ size_t size = PAGE_ALIGN(unaligned_size);
+ struct drm_gem_cma_object *cma_obj;
+
+ DRM_DEBUG("%s:%s:%d: %zd\n", __FILE__, __func__, __LINE__, size);
+
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
+ cma_obj = drm_gem_cma_create(dev, size);
+ if (IS_ERR(cma_obj))
+ return ERR_PTR(-ENOMEM);
+
+ return to_zocl_bo(&cma_obj->base);
+}
+
+int zocl_create_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ int ret;
+ struct drm_zocl_create_bo *args = data;
+ struct drm_zocl_bo *bo;
+
+ if (((args->flags & DRM_ZOCL_BO_FLAGS_COHERENT) == 0) ||
+ ((args->flags & DRM_ZOCL_BO_FLAGS_CMA) == 0))
+ return -EINVAL;
+
+ bo = zocl_create_bo(dev, args->size);
+ bo->flags |= DRM_ZOCL_BO_FLAGS_COHERENT;
+ bo->flags |= DRM_ZOCL_BO_FLAGS_CMA;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, bo);
+
+ if (IS_ERR(bo)) {
+ DRM_DEBUG("object creation failed\n");
+ return PTR_ERR(bo);
+ }
+ ret = drm_gem_handle_create(filp, &bo->base.base, &args->handle);
+ if (ret) {
+ drm_gem_cma_free_object(&bo->base.base);
+ DRM_DEBUG("handle creation failed\n");
+ return ret;
+ }
+
+ zocl_describe(bo);
+ drm_gem_object_put_unlocked(&bo->base.base);
+
+ return ret;
+}
+
+int zocl_map_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ struct drm_zocl_map_bo *args = data;
+ struct drm_gem_object *gem_obj;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+ gem_obj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ /* The mmap offset was set up at BO allocation time. */
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ zocl_describe(to_zocl_bo(gem_obj));
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return 0;
+}
+
+int zocl_sync_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_sync_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+ void *kaddr;
+ int ret = 0;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ if ((args->offset > gem_obj->size) || (args->size > gem_obj->size) ||
+ ((args->offset + args->size) > gem_obj->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ kaddr = drm_gem_cma_prime_vmap(gem_obj);
+
+ /* only invalidate the range of addresses requested by the user */
+ kaddr += args->offset;
+
+ if (args->dir == DRM_ZOCL_SYNC_BO_TO_DEVICE)
+ flush_kernel_vmap_range(kaddr, args->size);
+ else if (args->dir == DRM_ZOCL_SYNC_BO_FROM_DEVICE)
+ invalidate_kernel_vmap_range(kaddr, args->size);
+ else
+ ret = -EINVAL;
+
+out:
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
+
+int zocl_info_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_bo *bo;
+ struct drm_zocl_info_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ bo = to_zocl_bo(gem_obj);
+
+ args->size = bo->base.base.size;
+ args->paddr = bo->base.paddr;
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return 0;
+}
+
+int zocl_pwrite_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_pwrite_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+ char __user *user_data = to_user_ptr(args->data_ptr);
+ int ret = 0;
+ void *kaddr;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ if ((args->offset > gem_obj->size) || (args->size > gem_obj->size)
+ || ((args->offset + args->size) > gem_obj->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (args->size == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ if (!access_ok(user_data, args->size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ kaddr = drm_gem_cma_prime_vmap(gem_obj);
+ kaddr += args->offset;
+
+ ret = copy_from_user(kaddr, user_data, args->size);
+out:
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
+
+int zocl_pread_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_pread_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+ char __user *user_data = to_user_ptr(args->data_ptr);
+ int ret = 0;
+ void *kaddr;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ if ((args->offset > gem_obj->size) || (args->size > gem_obj->size)
+ || ((args->offset + args->size) > gem_obj->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (args->size == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ if (!access_ok(user_data, args->size)) {
+ ret = EFAULT;
+ goto out;
+ }
+
+ kaddr = drm_gem_cma_prime_vmap(gem_obj);
+ kaddr += args->offset;
+
+ ret = copy_to_user(user_data, kaddr, args->size);
+
+out:
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/zocl/zocl_drv.c b/drivers/gpu/drm/zocl/zocl_drv.c
new file mode 100644
index 000000000000..a97082ecc54e
--- /dev/null
+++ b/drivers/gpu/drm/zocl/zocl_drv.c
@@ -0,0 +1,217 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
+#include "zocl_drv.h"
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <linux/zocl_ioctl.h>
+
+#define ZOCL_DRIVER_NAME "zocl"
+#define ZOCL_DRIVER_DESC "Zynq BO manager"
+#define ZOCL_DRIVER_DATE "20161024"
+#define ZOCL_DRIVER_MAJOR 2016
+#define ZOCL_DRIVER_MINOR 3
+#define ZOCL_DRIVER_PATCHLEVEL 1
+#define ZOCL_FILE_PAGE_OFFSET 0x00100000
+
+#ifndef VM_RESERVED
+#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
+#endif
+
+static const struct vm_operations_struct reg_physical_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
+static int zocl_drm_load(struct drm_device *drm, unsigned long flags)
+{
+ struct platform_device *pdev;
+ struct resource *res;
+ struct drm_zocl_dev *zdev;
+ void __iomem *map;
+
+ pdev = to_platform_device(drm->dev);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ map = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(map)) {
+ DRM_ERROR("Failed to map registers: %ld\n", PTR_ERR(map));
+ return PTR_ERR(map);
+ }
+
+ zdev = devm_kzalloc(drm->dev, sizeof(*zdev), GFP_KERNEL);
+ if (!zdev)
+ return -ENOMEM;
+
+ zdev->ddev = drm;
+ drm->dev_private = zdev;
+ zdev->regs = map;
+ zdev->res_start = res->start;
+ zdev->res_len = resource_size(res);
+ platform_set_drvdata(pdev, zdev);
+
+ return 0;
+}
+
+static int zocl_drm_unload(struct drm_device *drm)
+{
+ return 0;
+}
+
+static void zocl_free_object(struct drm_gem_object *obj)
+{
+ struct drm_zocl_bo *zocl_obj = to_zocl_bo(obj);
+
+ DRM_INFO("Freeing BO\n");
+ zocl_describe(zocl_obj);
+ drm_gem_cma_free_object(obj);
+}
+
+static int zocl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_zocl_dev *zdev = dev->dev_private;
+ unsigned long vsize;
+ int rc;
+
+ /* If the page offset is > than 4G, then let GEM handle that and do what
+ * it thinks is best,we will only handle page offsets less than 4G.
+ */
+ if (likely(vma->vm_pgoff >= ZOCL_FILE_PAGE_OFFSET))
+ return drm_gem_cma_mmap(filp, vma);
+
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+
+ vsize = vma->vm_end - vma->vm_start;
+ if (vsize > zdev->res_len)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+
+ vma->vm_ops = &reg_physical_vm_ops;
+ rc = io_remap_pfn_range(vma, vma->vm_start,
+ zdev->res_start >> PAGE_SHIFT,
+ vsize, vma->vm_page_prot);
+
+ return rc;
+}
+
+static const struct drm_ioctl_desc zocl_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(ZOCL_CREATE_BO, zocl_create_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_MAP_BO, zocl_map_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_SYNC_BO, zocl_sync_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_INFO_BO, zocl_info_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_PWRITE_BO, zocl_pwrite_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_PREAD_BO, zocl_pread_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+};
+
+static const struct file_operations zocl_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = zocl_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+};
+
+static struct drm_driver zocl_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_RENDER,
+ .load = zocl_drm_load,
+ .unload = zocl_drm_unload,
+ .gem_free_object = zocl_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .ioctls = zocl_ioctls,
+ .num_ioctls = ARRAY_SIZE(zocl_ioctls),
+ .fops = &zocl_driver_fops,
+ .name = ZOCL_DRIVER_NAME,
+ .desc = ZOCL_DRIVER_DESC,
+ .date = ZOCL_DRIVER_DATE,
+ .major = ZOCL_DRIVER_MAJOR,
+ .minor = ZOCL_DRIVER_MINOR,
+ .patchlevel = ZOCL_DRIVER_PATCHLEVEL,
+};
+
+/* init xilinx opencl drm platform */
+static int zocl_drm_platform_probe(struct platform_device *pdev)
+{
+ return drm_platform_init(&zocl_driver, pdev);
+}
+
+/* exit xilinx opencl drm platform */
+static int zocl_drm_platform_remove(struct platform_device *pdev)
+{
+ struct drm_zocl_dev *zdev = platform_get_drvdata(pdev);
+
+ if (zdev->ddev) {
+ drm_dev_unregister(zdev->ddev);
+ drm_dev_put(zdev->ddev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id zocl_drm_of_match[] = {
+ { .compatible = "xlnx,zocl", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, zocl_drm_of_match);
+
+static struct platform_driver zocl_drm_private_driver = {
+ .probe = zocl_drm_platform_probe,
+ .remove = zocl_drm_platform_remove,
+ .driver = {
+ .name = "zocl-drm",
+ .of_match_table = zocl_drm_of_match,
+ },
+};
+
+module_platform_driver(zocl_drm_private_driver);
+
+MODULE_VERSION(__stringify(ZOCL_DRIVER_MAJOR) "."
+ __stringify(ZOCL_DRIVER_MINOR) "."
+ __stringify(ZOCL_DRIVER_PATCHLEVEL));
+
+MODULE_DESCRIPTION(ZOCL_DRIVER_DESC);
+MODULE_AUTHOR("Sonal Santan <sonal.santan@xilinx.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/zocl/zocl_drv.h b/drivers/gpu/drm/zocl/zocl_drv.h
new file mode 100644
index 000000000000..ef6a9acadfc1
--- /dev/null
+++ b/drivers/gpu/drm/zocl/zocl_drv.h
@@ -0,0 +1,59 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZOCL_DRV_H_
+#define _ZOCL_DRV_H_
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_gem_cma_helper.h>
+
+struct drm_zocl_bo {
+ struct drm_gem_cma_object base;
+ uint32_t flags;
+};
+
+struct drm_zocl_dev {
+ struct drm_device *ddev;
+ void __iomem *regs;
+ phys_addr_t res_start;
+ resource_size_t res_len;
+ unsigned int irq;
+};
+
+static inline struct drm_zocl_bo *to_zocl_bo(struct drm_gem_object *bo)
+{
+ return (struct drm_zocl_bo *) bo;
+}
+
+int zocl_create_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_sync_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_map_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_info_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_pwrite_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_pread_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+void zocl_describe(const struct drm_zocl_bo *obj);
+
+#endif
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index c0bc43d01018..e32b98e11869 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -229,10 +229,17 @@ static const struct i2c_device_id pmbus_id[] = {
MODULE_DEVICE_TABLE(i2c, pmbus_id);
+static const struct of_device_id pmbus_of_match[] = {
+ {.compatible = "ti,tps544b25"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, pmbus_of_match);
+
/* This is the driver that will be inserted */
static struct i2c_driver pmbus_driver = {
.driver = {
.name = "pmbus",
+ .of_match_table = of_match_ptr(pmbus_of_match),
},
.probe = pmbus_probe,
.remove = pmbus_do_remove,
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index a51d3b795770..b3799aec3680 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -13,7 +13,9 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/gpio/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
/* Register offsets for the I2C device. */
#define CDNS_I2C_CR_OFFSET 0x00 /* Control Register, RW */
@@ -22,11 +24,14 @@
#define CDNS_I2C_DATA_OFFSET 0x0C /* I2C Data Register, RW */
#define CDNS_I2C_ISR_OFFSET 0x10 /* IRQ Status Register, RW */
#define CDNS_I2C_XFER_SIZE_OFFSET 0x14 /* Transfer Size Register, RW */
+#define CDNS_I2C_SLV_PAUSE_OFFSET 0x18 /* Transfer Size Register, RW */
#define CDNS_I2C_TIME_OUT_OFFSET 0x1C /* Time Out Register, RW */
+#define CDNS_I2C_IMR_OFFSET 0x20 /* IRQ Mask Register, RO */
#define CDNS_I2C_IER_OFFSET 0x24 /* IRQ Enable Register, WO */
#define CDNS_I2C_IDR_OFFSET 0x28 /* IRQ Disable Register, WO */
/* Control Register Bit mask definitions */
+#define CDNS_I2C_CR_SLVMON BIT(5) /* Slave monitor mode bit */
#define CDNS_I2C_CR_HOLD BIT(4) /* Hold Bus bit */
#define CDNS_I2C_CR_ACK_EN BIT(3)
#define CDNS_I2C_CR_NEA BIT(2)
@@ -40,9 +45,16 @@
#define CDNS_I2C_CR_DIVB_SHIFT 8
#define CDNS_I2C_CR_DIVB_MASK (0x3f << CDNS_I2C_CR_DIVB_SHIFT)
+#define CDNS_I2C_CR_SLAVE_EN_MASK (CDNS_I2C_CR_CLR_FIFO | \
+ CDNS_I2C_CR_NEA | \
+ CDNS_I2C_CR_ACK_EN | \
+ CDNS_I2C_CR_MS)
+
/* Status Register Bit mask definitions */
#define CDNS_I2C_SR_BA BIT(8)
+#define CDNS_I2C_SR_TXDV BIT(6)
#define CDNS_I2C_SR_RXDV BIT(5)
+#define CDNS_I2C_SR_RXRW BIT(3)
/*
* I2C Address Register Bit mask definitions
@@ -91,6 +103,14 @@
CDNS_I2C_IXR_DATA | \
CDNS_I2C_IXR_COMP)
+#define CDNS_I2C_IXR_SLAVE_INTR_MASK (CDNS_I2C_IXR_RX_UNF | \
+ CDNS_I2C_IXR_TX_OVF | \
+ CDNS_I2C_IXR_RX_OVF | \
+ CDNS_I2C_IXR_TO | \
+ CDNS_I2C_IXR_NACK | \
+ CDNS_I2C_IXR_DATA | \
+ CDNS_I2C_IXR_COMP)
+
#define CDNS_I2C_TIMEOUT msecs_to_jiffies(1000)
/* timeout for pm runtime autosuspend */
#define CNDS_I2C_PM_TIMEOUT 1000 /* ms */
@@ -117,6 +137,32 @@
#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+/**
+ * enum cdns_i2c_mode - I2C Controller current operating mode
+ *
+ * @CDNS_I2C_MODE_SLAVE: I2C controller operating in slave mode
+ * @CDNS_I2C_MODE_MASTER: I2C Controller operating in master mode
+ */
+enum cdns_i2c_mode {
+ CDNS_I2C_MODE_SLAVE,
+ CDNS_I2C_MODE_MASTER,
+};
+
+/**
+ * enum cdns_i2c_slave_mode - Slave state when I2C is operating in slave mode
+ *
+ * @CDNS_I2C_SLAVE_STATE_IDLE: I2C slave idle
+ * @CDNS_I2C_SLAVE_STATE_SEND: I2C slave sending data to master
+ * @CDNS_I2C_SLAVE_STATE_RECV: I2C slave receiving data from master
+ */
+enum cdns_i2c_slave_state {
+ CDNS_I2C_SLAVE_STATE_IDLE,
+ CDNS_I2C_SLAVE_STATE_SEND,
+ CDNS_I2C_SLAVE_STATE_RECV,
+};
+#endif
+
/**
* struct cdns_i2c - I2C device private data structure
*
@@ -138,6 +184,14 @@
* @clk: Pointer to struct clk
* @clk_rate_change_nb: Notifier block for clock rate changes
* @quirks: flag for broken hold bit usage in r1p10
+ * @ctrl_reg: Cached value of the control register.
+ * @rinfo: Structure holding recovery information.
+ * @pinctrl: Pin control state holder.
+ * @pinctrl_pins_default: Default pin control state.
+ * @pinctrl_pins_gpio: GPIO pin control state.
+ * @slave: Registered slave instance.
+ * @dev_mode: I2C operating role(master/slave).
+ * @slave_state: I2C Slave state(idle/read/write).
*/
struct cdns_i2c {
struct device *dev;
@@ -158,6 +212,16 @@ struct cdns_i2c {
struct clk *clk;
struct notifier_block clk_rate_change_nb;
u32 quirks;
+ u32 ctrl_reg;
+ struct i2c_bus_recovery_info rinfo;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pinctrl_pins_default;
+ struct pinctrl_state *pinctrl_pins_gpio;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ struct i2c_client *slave;
+ enum cdns_i2c_mode dev_mode;
+ enum cdns_i2c_slave_state slave_state;
+#endif
};
struct cdns_platform_data {
@@ -186,17 +250,148 @@ static inline bool cdns_is_holdquirk(struct cdns_i2c *id, bool hold_wrkaround)
(id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1));
}
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+static void cdns_i2c_set_mode(enum cdns_i2c_mode mode, struct cdns_i2c *id)
+{
+ u32 reg;
+
+ /* Disable all interrupts */
+ cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET);
+
+ /* Clear FIFO and transfer size */
+ cdns_i2c_writereg(CDNS_I2C_CR_CLR_FIFO, CDNS_I2C_CR_OFFSET);
+
+ /* Update device mode and state */
+ id->dev_mode = mode;
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+
+ switch (mode) {
+ case CDNS_I2C_MODE_MASTER:
+ /* Enable i2c master */
+ cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET);
+ break;
+ case CDNS_I2C_MODE_SLAVE:
+ /* Enable i2c slave */
+ reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ reg &= ~CDNS_I2C_CR_SLAVE_EN_MASK;
+ cdns_i2c_writereg(reg, CDNS_I2C_CR_OFFSET);
+
+ /* Setting slave address */
+ cdns_i2c_writereg(id->slave->addr & CDNS_I2C_ADDR_MASK,
+ CDNS_I2C_ADDR_OFFSET);
+
+ /* Enable slave send/receive interrupts */
+ cdns_i2c_writereg(CDNS_I2C_IXR_SLAVE_INTR_MASK,
+ CDNS_I2C_IER_OFFSET);
+ break;
+ }
+}
+
+static void cdns_i2c_slave_rcv_data(struct cdns_i2c *id)
+{
+ u8 bytes;
+ unsigned char data;
+
+ /* Prepare backend for data reception */
+ if (id->slave_state == CDNS_I2C_SLAVE_STATE_IDLE) {
+ id->slave_state = CDNS_I2C_SLAVE_STATE_RECV;
+ i2c_slave_event(id->slave, I2C_SLAVE_WRITE_REQUESTED, NULL);
+ }
+
+ /* Fetch number of bytes to receive */
+ bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
+
+ /* Read data and send to backend */
+ while (bytes--) {
+ data = cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
+ i2c_slave_event(id->slave, I2C_SLAVE_WRITE_RECEIVED, &data);
+ }
+}
+
+static void cdns_i2c_slave_send_data(struct cdns_i2c *id)
+{
+ u8 data;
+
+ /* Prepare backend for data transmission */
+ if (id->slave_state == CDNS_I2C_SLAVE_STATE_IDLE) {
+ id->slave_state = CDNS_I2C_SLAVE_STATE_SEND;
+ i2c_slave_event(id->slave, I2C_SLAVE_READ_REQUESTED, &data);
+ } else {
+ i2c_slave_event(id->slave, I2C_SLAVE_READ_PROCESSED, &data);
+ }
+
+ /* Send data over bus */
+ cdns_i2c_writereg(data, CDNS_I2C_DATA_OFFSET);
+}
+
/**
- * cdns_i2c_isr - Interrupt handler for the I2C device
- * @irq: irq number for the I2C device
- * @ptr: void pointer to cdns_i2c structure
+ * cdns_i2c_slave_isr - Interrupt handler for the I2C device in slave role
+ * @ptr: Pointer to I2C device private data
+ *
+ * This function handles the data interrupt and transfer complete interrupt of
+ * the I2C device in slave role.
+ *
+ * Return: IRQ_HANDLED always
+ */
+static irqreturn_t cdns_i2c_slave_isr(void *ptr)
+{
+ struct cdns_i2c *id = ptr;
+ unsigned int isr_status, i2c_status;
+
+ /* Fetch the interrupt status */
+ isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+ cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
+
+ /* Ignore masked interrupts */
+ isr_status &= ~cdns_i2c_readreg(CDNS_I2C_IMR_OFFSET);
+
+ /* Fetch transfer mode (send/receive) */
+ i2c_status = cdns_i2c_readreg(CDNS_I2C_SR_OFFSET);
+
+ /* Handle data send/receive */
+ if (i2c_status & CDNS_I2C_SR_RXRW) {
+ /* Send data to master */
+ if (isr_status & CDNS_I2C_IXR_DATA)
+ cdns_i2c_slave_send_data(id);
+
+ if (isr_status & CDNS_I2C_IXR_COMP) {
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+ i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL);
+ }
+ } else {
+ /* Receive data from master */
+ if (isr_status & CDNS_I2C_IXR_DATA)
+ cdns_i2c_slave_rcv_data(id);
+
+ if (isr_status & CDNS_I2C_IXR_COMP) {
+ cdns_i2c_slave_rcv_data(id);
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+ i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL);
+ }
+ }
+
+ /* Master indicated xfer stop or fifo underflow/overflow */
+ if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_RX_OVF |
+ CDNS_I2C_IXR_RX_UNF | CDNS_I2C_IXR_TX_OVF)) {
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+ i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL);
+ cdns_i2c_writereg(CDNS_I2C_CR_CLR_FIFO, CDNS_I2C_CR_OFFSET);
+ }
+
+ return IRQ_HANDLED;
+}
+#endif
+
+/**
+ * cdns_i2c_master_isr - Interrupt handler for the I2C device in master role
+ * @ptr: Pointer to I2C device private data
*
* This function handles the data interrupt, transfer complete interrupt and
- * the error interrupts of the I2C device.
+ * the error interrupts of the I2C device in master role.
*
* Return: IRQ_HANDLED always
*/
-static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
+static irqreturn_t cdns_i2c_master_isr(void *ptr)
{
unsigned int isr_status, avail_bytes, updatetx;
unsigned int bytes_to_send;
@@ -341,6 +536,23 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
status = IRQ_HANDLED;
}
+ /* Handling Slave monitor mode interrupt */
+ if (isr_status & CDNS_I2C_IXR_SLV_RDY) {
+ unsigned int ctrl_reg;
+ /* Read control register */
+ ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+
+ /* Disable slave monitor mode */
+ ctrl_reg &= ~CDNS_I2C_CR_SLVMON;
+ cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
+
+ /* Clear interrupt flag for slvmon mode */
+ cdns_i2c_writereg(CDNS_I2C_IXR_SLV_RDY, CDNS_I2C_IDR_OFFSET);
+
+ done_flag = 1;
+ status = IRQ_HANDLED;
+ }
+
/* Update the status for errors */
id->err_status = isr_status & CDNS_I2C_IXR_ERR_INTR_MASK;
if (id->err_status)
@@ -353,6 +565,40 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
}
/**
+ * cdns_i2c_isr - Interrupt handler for the I2C device
+ * @irq: irq number for the I2C device
+ * @ptr: void pointer to cdns_i2c structure
+ *
+ * This function passes the control to slave/master based on current role of
+ * i2c controller.
+ *
+ * Return: IRQ_HANDLED always
+ */
+static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
+{
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ struct cdns_i2c *id = ptr;
+
+ switch (id->dev_mode) {
+ case CDNS_I2C_MODE_SLAVE:
+ dev_dbg(&id->adap.dev, "slave interrupt\n");
+ cdns_i2c_slave_isr(ptr);
+ break;
+ case CDNS_I2C_MODE_MASTER:
+ dev_dbg(&id->adap.dev, "master interrupt\n");
+ cdns_i2c_master_isr(ptr);
+ break;
+ default:
+ dev_dbg(&id->adap.dev, "undefined interrupt\n");
+ break;
+ }
+#else
+ cdns_i2c_master_isr(ptr);
+#endif
+ return IRQ_HANDLED;
+}
+
+/**
* cdns_i2c_mrecv - Prepare and start a master receive operation
* @id: pointer to the i2c device structure
*/
@@ -360,6 +606,7 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
{
unsigned int ctrl_reg;
unsigned int isr_status;
+ unsigned long flags;
id->p_recv_buf = id->p_msg->buf;
id->recv_count = id->p_msg->len;
@@ -377,8 +624,12 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
- if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
+ if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) {
ctrl_reg |= CDNS_I2C_CR_HOLD;
+ } else {
+ if (id->p_msg->flags & I2C_M_NOSTART)
+ ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
+ }
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
@@ -401,6 +652,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
}
/* Set the slave address in address register - triggers operation */
+ cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
+ local_irq_save(flags);
cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
CDNS_I2C_ADDR_OFFSET);
/* Clear the bus hold flag if bytes to receive is less than FIFO size */
@@ -408,7 +661,7 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
(id->recv_count <= CDNS_I2C_FIFO_DEPTH))
cdns_i2c_clear_bus_hold(id);
- cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
+ local_irq_restore(flags);
}
/**
@@ -468,10 +721,43 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
if (!id->bus_hold_flag && !id->send_count)
cdns_i2c_clear_bus_hold(id);
/* Set the slave address in address register - triggers operation. */
+ cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
CDNS_I2C_ADDR_OFFSET);
+}
- cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
+/**
+ * cdns_i2c_slvmon - Handling Slav monitor mode feature
+ * @id: pointer to the i2c device
+ */
+static void cdns_i2c_slvmon(struct cdns_i2c *id)
+{
+ unsigned int ctrl_reg;
+ unsigned int isr_status;
+
+ id->p_recv_buf = NULL;
+ id->p_send_buf = id->p_msg->buf;
+ id->send_count = id->p_msg->len;
+
+ /* Clear the interrupts in interrupt status register. */
+ isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+ cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
+
+ /* Enable slvmon control reg */
+ ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ ctrl_reg |= CDNS_I2C_CR_MS | CDNS_I2C_CR_NEA | CDNS_I2C_CR_SLVMON
+ | CDNS_I2C_CR_CLR_FIFO;
+ ctrl_reg &= ~(CDNS_I2C_CR_RW);
+ cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
+
+ /* Initialize slvmon reg */
+ cdns_i2c_writereg(0xF, CDNS_I2C_SLV_PAUSE_OFFSET);
+
+ /* Set the slave address to start the slave address transmission */
+ cdns_i2c_writereg(id->p_msg->addr, CDNS_I2C_ADDR_OFFSET);
+
+ /* Setup slvmon interrupt flag */
+ cdns_i2c_writereg(CDNS_I2C_IXR_SLV_RDY, CDNS_I2C_IER_OFFSET);
}
/**
@@ -490,12 +776,12 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap)
cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET);
/* Clear the hold bit and fifos */
regval = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
- regval &= ~CDNS_I2C_CR_HOLD;
+ regval &= ~(CDNS_I2C_CR_HOLD | CDNS_I2C_CR_SLVMON);
regval |= CDNS_I2C_CR_CLR_FIFO;
cdns_i2c_writereg(regval, CDNS_I2C_CR_OFFSET);
/* Update the transfercount register to zero */
cdns_i2c_writereg(0, CDNS_I2C_XFER_SIZE_OFFSET);
- /* Clear the interupt status register */
+ /* Clear the interrupt status register */
regval = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
cdns_i2c_writereg(regval, CDNS_I2C_ISR_OFFSET);
/* Clear the status register */
@@ -524,9 +810,11 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
cdns_i2c_writereg(reg | CDNS_I2C_CR_NEA,
CDNS_I2C_CR_OFFSET);
}
-
- /* Check for the R/W flag on each msg */
- if (msg->flags & I2C_M_RD)
+ /* Check for zero length - Slave monitor mode */
+ if (msg->len == 0)
+ cdns_i2c_slvmon(id);
+ /* Check for the R/W flag on each msg */
+ else if (msg->flags & I2C_M_RD)
cdns_i2c_mrecv(id);
else
cdns_i2c_msend(id);
@@ -534,6 +822,7 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
/* Wait for the signal of completion */
time_left = wait_for_completion_timeout(&id->xfer_done, adap->timeout);
if (time_left == 0) {
+ i2c_recover_bus(adap);
cdns_i2c_master_reset(adap);
dev_err(id->adap.dev.parent,
"timeout waiting on completion\n");
@@ -567,15 +856,34 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
u32 reg;
struct cdns_i2c *id = adap->algo_data;
bool hold_quirk;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ bool change_role = false;
+#endif
ret = pm_runtime_get_sync(id->dev);
if (ret < 0)
return ret;
- /* Check if the bus is free */
- if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_BA) {
- ret = -EAGAIN;
- goto out;
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ /* Check i2c operating mode and switch if possible */
+ if (id->dev_mode == CDNS_I2C_MODE_SLAVE) {
+ if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE)
+ return -EAGAIN;
+
+ /* Set mode to master */
+ cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id);
+
+ /* Mark flag to change role once xfer is completed */
+ change_role = true;
}
+#endif
+
+ /* Check if the bus is free */
+ if (msgs->len)
+ if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_BA) {
+ ret = -EAGAIN;
+ goto out;
+ }
hold_quirk = !!(id->quirks & CDNS_I2C_BROKEN_HOLD_BIT);
/*
@@ -629,6 +937,13 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
ret = num;
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ /* Switch i2c mode to slave */
+ if (change_role)
+ cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id);
+#endif
+
out:
pm_runtime_mark_last_busy(id->dev);
pm_runtime_put_autosuspend(id->dev);
@@ -643,14 +958,67 @@ out:
*/
static u32 cdns_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR |
- (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
- I2C_FUNC_SMBUS_BLOCK_DATA;
+ u32 func = I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR |
+ (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
+ I2C_FUNC_SMBUS_BLOCK_DATA;
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ func |= I2C_FUNC_SLAVE;
+#endif
+
+ return func;
+}
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+static int cdns_reg_slave(struct i2c_client *slave)
+{
+ int ret;
+ struct cdns_i2c *id = container_of(slave->adapter, struct cdns_i2c,
+ adap);
+
+ if (id->slave)
+ return -EBUSY;
+
+ if (slave->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ ret = pm_runtime_get_sync(id->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Store slave information */
+ id->slave = slave;
+
+ /* Enable I2C slave */
+ cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id);
+
+ return 0;
+}
+
+static int cdns_unreg_slave(struct i2c_client *slave)
+{
+ struct cdns_i2c *id = container_of(slave->adapter, struct cdns_i2c,
+ adap);
+
+ pm_runtime_put(id->dev);
+
+ /* Remove slave information */
+ id->slave = NULL;
+
+ /* Enable I2C master */
+ cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id);
+
+ return 0;
}
+#endif
static const struct i2c_algorithm cdns_i2c_algo = {
.master_xfer = cdns_i2c_master_xfer,
.functionality = cdns_i2c_func,
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ .reg_slave = cdns_reg_slave,
+ .unreg_slave = cdns_unreg_slave,
+#endif
};
/**
@@ -740,12 +1108,11 @@ static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id)
if (ret)
return ret;
- ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ ctrl_reg = id->ctrl_reg;
ctrl_reg &= ~(CDNS_I2C_CR_DIVA_MASK | CDNS_I2C_CR_DIVB_MASK);
ctrl_reg |= ((div_a << CDNS_I2C_CR_DIVA_SHIFT) |
(div_b << CDNS_I2C_CR_DIVB_SHIFT));
- cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
-
+ id->ctrl_reg = ctrl_reg;
return 0;
}
@@ -829,6 +1196,26 @@ static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev)
}
/**
+ * cdns_i2c_init - Controller initialisation
+ * @id: Device private data structure
+ *
+ * Initialise the i2c controller.
+ *
+ */
+static void cdns_i2c_init(struct cdns_i2c *id)
+{
+ cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET);
+ /*
+ * Cadence I2C controller has a bug wherein it generates
+ * invalid read transaction after HW timeout in master receiver mode.
+ * HW timeout is not used by this driver and the interrupt is disabled.
+ * But the feature itself cannot be disabled. Hence maximum value
+ * is written to this register to reduce the chances of error.
+ */
+ cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+}
+
+/**
* cdns_i2c_runtime_resume - Runtime resume
* @dev: Address of the platform_device structure
*
@@ -846,6 +1233,89 @@ static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev)
dev_err(dev, "Cannot enable clock.\n");
return ret;
}
+ cdns_i2c_init(xi2c);
+
+ return 0;
+}
+
+/**
+ * cdns_i2c_prepare_recovery - Withhold recovery state
+ * @adapter: Pointer to i2c adapter
+ *
+ * This function is called to prepare for recovery.
+ * It changes the state of pins from SCL/SDA to GPIO.
+ */
+static void cdns_i2c_prepare_recovery(struct i2c_adapter *adapter)
+{
+ struct cdns_i2c *p_cdns_i2c;
+
+ p_cdns_i2c = container_of(adapter, struct cdns_i2c, adap);
+
+ /* Setting pin state as gpio */
+ pinctrl_select_state(p_cdns_i2c->pinctrl,
+ p_cdns_i2c->pinctrl_pins_gpio);
+}
+
+/**
+ * cdns_i2c_unprepare_recovery - Release recovery state
+ * @adapter: Pointer to i2c adapter
+ *
+ * This function is called on exiting recovery. It reverts
+ * the state of pins from GPIO to SCL/SDA.
+ */
+static void cdns_i2c_unprepare_recovery(struct i2c_adapter *adapter)
+{
+ struct cdns_i2c *p_cdns_i2c;
+
+ p_cdns_i2c = container_of(adapter, struct cdns_i2c, adap);
+
+ /* Setting pin state to default(i2c) */
+ pinctrl_select_state(p_cdns_i2c->pinctrl,
+ p_cdns_i2c->pinctrl_pins_default);
+}
+
+/**
+ * cdns_i2c_init_recovery_info - Initialize I2C bus recovery
+ * @pid: Pointer to cdns i2c structure
+ * @pdev: Handle to the platform device structure
+ *
+ * This function does required initialization for i2c bus
+ * recovery. It registers three functions for prepare,
+ * recover and unprepare
+ *
+ * Return: 0 on Success, negative error otherwise.
+ */
+static int cdns_i2c_init_recovery_info(struct cdns_i2c *pid,
+ struct platform_device *pdev)
+{
+ struct i2c_bus_recovery_info *rinfo = &pid->rinfo;
+
+ pid->pinctrl_pins_default = pinctrl_lookup_state(pid->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ pid->pinctrl_pins_gpio = pinctrl_lookup_state(pid->pinctrl, "gpio");
+
+ /* Fetches GPIO pins */
+ rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda-gpios", 0);
+ rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl-gpios", 0);
+
+ /* if GPIO driver isn't ready yet, deffer probe */
+ if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
+ PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /* Validates fetched information */
+ if (IS_ERR(rinfo->sda_gpiod) ||
+ IS_ERR(rinfo->scl_gpiod) ||
+ IS_ERR(pid->pinctrl_pins_default) ||
+ IS_ERR(pid->pinctrl_pins_gpio)) {
+ dev_dbg(&pdev->dev, "recovery information incomplete\n");
+ return 0;
+ }
+
+ rinfo->prepare_recovery = cdns_i2c_prepare_recovery;
+ rinfo->unprepare_recovery = cdns_i2c_unprepare_recovery;
+ rinfo->recover_bus = i2c_generic_scl_recovery;
+ pid->adap.bus_recovery_info = rinfo;
return 0;
}
@@ -896,6 +1366,13 @@ static int cdns_i2c_probe(struct platform_device *pdev)
id->quirks = data->quirks;
}
+ id->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!IS_ERR(id->pinctrl)) {
+ ret = cdns_i2c_init_recovery_info(id, pdev);
+ if (ret)
+ return ret;
+ }
+
r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
id->membase = devm_ioremap_resource(&pdev->dev, r_mem);
if (IS_ERR(id->membase))
@@ -923,10 +1400,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
if (ret)
dev_err(&pdev->dev, "Unable to enable clock.\n");
- pm_runtime_enable(id->dev);
pm_runtime_set_autosuspend_delay(id->dev, CNDS_I2C_PM_TIMEOUT);
pm_runtime_use_autosuspend(id->dev);
pm_runtime_set_active(id->dev);
+ pm_runtime_enable(id->dev);
id->clk_rate_change_nb.notifier_call = cdns_i2c_clk_notifier_cb;
if (clk_notifier_register(id->clk, &id->clk_rate_change_nb))
@@ -938,8 +1415,12 @@ static int cdns_i2c_probe(struct platform_device *pdev)
if (ret || (id->i2c_clk > CDNS_I2C_SPEED_MAX))
id->i2c_clk = CDNS_I2C_SPEED_DEFAULT;
- cdns_i2c_writereg(CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS,
- CDNS_I2C_CR_OFFSET);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ /* Set initial mode to master */
+ id->dev_mode = CDNS_I2C_MODE_MASTER;
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+#endif
+ id->ctrl_reg = CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS;
ret = cdns_i2c_setclk(id->input_clk, id);
if (ret) {
@@ -955,14 +1436,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
goto err_clk_dis;
}
- /*
- * Cadence I2C controller has a bug wherein it generates
- * invalid read transaction after HW timeout in master receiver mode.
- * HW timeout is not used by this driver and the interrupt is disabled.
- * But the feature itself cannot be disabled. Hence maximum value
- * is written to this register to reduce the chances of error.
- */
- cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+ cdns_i2c_init(id);
ret = i2c_add_adapter(&id->adap);
if (ret < 0)
@@ -975,8 +1449,8 @@ static int cdns_i2c_probe(struct platform_device *pdev)
err_clk_dis:
clk_disable_unprepare(id->clk);
- pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
return ret;
}
@@ -992,10 +1466,13 @@ static int cdns_i2c_remove(struct platform_device *pdev)
{
struct cdns_i2c *id = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
i2c_del_adapter(&id->adap);
clk_notifier_unregister(id->clk, &id->clk_rate_change_nb);
clk_disable_unprepare(id->clk);
- pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 37b3b9307d07..30e42bb96df5 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -46,6 +46,7 @@ enum xiic_endian {
/**
* struct xiic_i2c - Internal representation of the XIIC I2C bus
+ * @dev: Pointer to device structure
* @base: Memory base of the HW registers
* @wait: Wait queue for callers
* @adap: Kernel adapter representation
@@ -57,6 +58,7 @@ enum xiic_endian {
* @rx_msg: Current RX message
* @rx_pos: Position within current RX message
* @endianness: big/little-endian byte order
+ * @clk: Pointer to struct clk
*/
struct xiic_i2c {
struct device *dev;
@@ -154,6 +156,8 @@ struct xiic_i2c {
#define XIIC_RESET_MASK 0xAUL
#define XIIC_PM_TIMEOUT 1000 /* ms */
+/* timeout waiting for the controller to respond */
+#define XIIC_I2C_TIMEOUT (msecs_to_jiffies(1000))
/*
* The following constant is used for the device global interrupt enable
* register, to enable all interrupts for the device, this is the only bit
@@ -164,7 +168,7 @@ struct xiic_i2c {
#define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos)
#define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos)
-static void xiic_start_xfer(struct xiic_i2c *i2c);
+static int xiic_start_xfer(struct xiic_i2c *i2c);
static void __xiic_start_xfer(struct xiic_i2c *i2c);
/*
@@ -245,17 +249,29 @@ static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask)
xiic_irq_en(i2c, mask);
}
-static void xiic_clear_rx_fifo(struct xiic_i2c *i2c)
+static int xiic_clear_rx_fifo(struct xiic_i2c *i2c)
{
u8 sr;
+ unsigned long timeout;
+
+ timeout = jiffies + XIIC_I2C_TIMEOUT;
for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
!(sr & XIIC_SR_RX_FIFO_EMPTY_MASK);
- sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET))
+ sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)) {
xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
+ if (time_after(jiffies, timeout)) {
+ dev_err(i2c->dev, "Failed to clear rx fifo\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
}
-static void xiic_reinit(struct xiic_i2c *i2c)
+static int xiic_reinit(struct xiic_i2c *i2c)
{
+ int ret;
+
xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
/* Set receive Fifo depth to maximum (zero based). */
@@ -268,12 +284,16 @@ static void xiic_reinit(struct xiic_i2c *i2c)
xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK);
/* make sure RX fifo is empty */
- xiic_clear_rx_fifo(i2c);
+ ret = xiic_clear_rx_fifo(i2c);
+ if (ret)
+ return ret;
/* Enable interrupts */
xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK);
+
+ return 0;
}
static void xiic_deinit(struct xiic_i2c *i2c)
@@ -653,12 +673,18 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
}
-static void xiic_start_xfer(struct xiic_i2c *i2c)
+static int xiic_start_xfer(struct xiic_i2c *i2c)
{
+ int ret;
mutex_lock(&i2c->lock);
- xiic_reinit(i2c);
- __xiic_start_xfer(i2c);
+
+ ret = xiic_reinit(i2c);
+ if (!ret)
+ __xiic_start_xfer(i2c);
+
mutex_unlock(&i2c->lock);
+
+ return ret;
}
static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
@@ -680,7 +706,11 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
i2c->tx_msg = msgs;
i2c->nmsgs = num;
- xiic_start_xfer(i2c);
+ err = xiic_start_xfer(i2c);
+ if (err < 0) {
+ dev_err(adap->dev.parent, "Error xiic_start_xfer\n");
+ goto out;
+ }
if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
(i2c->state == STATE_DONE), HZ)) {
@@ -758,7 +788,8 @@ static int xiic_i2c_probe(struct platform_device *pdev)
i2c->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c->clk)) {
- dev_err(&pdev->dev, "input clock not found.\n");
+ if (PTR_ERR(i2c->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "input clock not found.\n");
return PTR_ERR(i2c->clk);
}
ret = clk_prepare_enable(i2c->clk);
@@ -767,10 +798,10 @@ static int xiic_i2c_probe(struct platform_device *pdev)
return ret;
}
i2c->dev = &pdev->dev;
- pm_runtime_enable(i2c->dev);
pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT);
pm_runtime_use_autosuspend(i2c->dev);
pm_runtime_set_active(i2c->dev);
+ pm_runtime_enable(i2c->dev);
ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr,
xiic_process, IRQF_ONESHOT,
pdev->name, i2c);
@@ -792,7 +823,11 @@ static int xiic_i2c_probe(struct platform_device *pdev)
if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK))
i2c->endianness = BIG;
- xiic_reinit(i2c);
+ ret = xiic_reinit(i2c);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot xiic_reinit\n");
+ goto err_clk_dis;
+ }
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
@@ -824,14 +859,16 @@ static int xiic_i2c_remove(struct platform_device *pdev)
/* remove adapter & data */
i2c_del_adapter(&i2c->adap);
- ret = clk_prepare_enable(i2c->clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable clock.\n");
+ ret = pm_runtime_get_sync(i2c->dev);
+ if (ret < 0)
return ret;
- }
+
xiic_deinit(i2c);
+ pm_runtime_put_sync(i2c->dev);
clk_disable_unprepare(i2c->clk);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
return 0;
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index f96a7702b020..1f09c6603310 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -1085,7 +1085,7 @@ config VIPERBOARD_ADC
config XILINX_XADC
tristate "Xilinx XADC driver"
- depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
+ depends on ARCH_ZYNQ || ARCH_ZYNQMP || MICROBLAZE || COMPILE_TEST
depends on HAS_IOMEM
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
@@ -1096,4 +1096,14 @@ config XILINX_XADC
The driver can also be build as a module. If so, the module will be called
xilinx-xadc.
+config XILINX_AMS
+ tristate "Xilinx AMS driver"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to have support for the Xilinx AMS.
+
+ The driver can also be build as a module. If so, the module will be called
+ xilinx-ams.
+
endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index ef9cc485fb67..bda62f310871 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -99,4 +99,5 @@ obj-$(CONFIG_VF610_ADC) += vf610_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
+obj-$(CONFIG_XILINX_AMS) += xilinx-ams.o
obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
new file mode 100644
index 000000000000..c21c46512d5f
--- /dev/null
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -0,0 +1,1109 @@
+/*
+ * Xilinx AMS driver
+ *
+ * Licensed under the GPL-2
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/iopoll.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
+#include <linux/io.h>
+
+#include "xilinx-ams.h"
+#include <linux/delay.h>
+
+static const unsigned int AMS_UNMASK_TIMEOUT = 500;
+
+static inline void ams_read_reg(struct ams *ams, unsigned int offset, u32 *data)
+{
+ *data = readl(ams->base + offset);
+}
+
+static inline void ams_write_reg(struct ams *ams, unsigned int offset, u32 data)
+{
+ writel(data, ams->base + offset);
+}
+
+static inline void ams_update_reg(struct ams *ams, unsigned int offset,
+ u32 mask, u32 data)
+{
+ u32 val;
+
+ ams_read_reg(ams, offset, &val);
+ ams_write_reg(ams, offset, (val & ~mask) | (mask & data));
+}
+
+static inline void ams_ps_read_reg(struct ams *ams, unsigned int offset,
+ u32 *data)
+{
+ *data = readl(ams->ps_base + offset);
+}
+
+static inline void ams_ps_write_reg(struct ams *ams, unsigned int offset,
+ u32 data)
+{
+ writel(data, ams->ps_base + offset);
+}
+
+static inline void ams_ps_update_reg(struct ams *ams, unsigned int offset,
+ u32 mask, u32 data)
+{
+ u32 val;
+
+ ams_ps_read_reg(ams, offset, &val);
+ ams_ps_write_reg(ams, offset, (val & ~mask) | (data & mask));
+}
+
+static inline void ams_apb_pl_read_reg(struct ams *ams, unsigned int offset,
+ u32 *data)
+{
+ *data = readl(ams->pl_base + offset);
+}
+
+static inline void ams_apb_pl_write_reg(struct ams *ams, unsigned int offset,
+ u32 data)
+{
+ writel(data, ams->pl_base + offset);
+}
+
+static inline void ams_apb_pl_update_reg(struct ams *ams, unsigned int offset,
+ u32 mask, u32 data)
+{
+ u32 val;
+
+ ams_apb_pl_read_reg(ams, offset, &val);
+ ams_apb_pl_write_reg(ams, offset, (val & ~mask) | (data & mask));
+}
+
+static void ams_update_intrmask(struct ams *ams, u64 mask, u64 val)
+{
+ /* intr_mask variable in ams represent bit in AMS regisetr IDR0 and IDR1
+ * first 32 biit will be of IDR0, next one are of IDR1 register.
+ */
+ ams->intr_mask &= ~mask;
+ ams->intr_mask |= (val & mask);
+
+ ams_write_reg(ams, AMS_IER_0, ~(ams->intr_mask | ams->masked_alarm));
+ ams_write_reg(ams, AMS_IER_1,
+ ~(ams->intr_mask >> AMS_ISR1_INTR_MASK_SHIFT));
+ ams_write_reg(ams, AMS_IDR_0, ams->intr_mask | ams->masked_alarm);
+ ams_write_reg(ams, AMS_IDR_1,
+ ams->intr_mask >> AMS_ISR1_INTR_MASK_SHIFT);
+}
+
+static void iio_ams_disable_all_alarm(struct ams *ams)
+{
+ /* disable PS module alarm */
+ if (ams->ps_base) {
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
+ AMS_REGCFG1_ALARM_MASK);
+ ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
+ AMS_REGCFG3_ALARM_MASK);
+ }
+
+ /* disable PL module alarm */
+ if (ams->pl_base) {
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1,
+ AMS_REGCFG1_ALARM_MASK,
+ AMS_REGCFG1_ALARM_MASK);
+ ams->pl_bus->update(ams, AMS_REG_CONFIG3,
+ AMS_REGCFG3_ALARM_MASK,
+ AMS_REGCFG3_ALARM_MASK);
+ }
+}
+
+static void iio_ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
+{
+ u32 cfg;
+ unsigned long flags;
+ unsigned long pl_alarm_mask;
+
+ if (ams->ps_base) {
+ /* Configuring PS alarm enable */
+ cfg = ~((alarm_mask & AMS_ISR0_ALARM_2_TO_0_MASK) <<
+ AMS_CONF1_ALARM_2_TO_0_SHIFT);
+ cfg &= ~((alarm_mask & AMS_ISR0_ALARM_6_TO_3_MASK) <<
+ AMS_CONF1_ALARM_6_TO_3_SHIFT);
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
+ cfg);
+
+ cfg = ~((alarm_mask >> AMS_CONF3_ALARM_12_TO_7_SHIFT) &
+ AMS_ISR0_ALARM_12_TO_7_MASK);
+ ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
+ cfg);
+ }
+
+ if (ams->pl_base) {
+ pl_alarm_mask = (alarm_mask >> AMS_PL_ALARM_START);
+ /* Configuring PL alarm enable */
+ cfg = ~((pl_alarm_mask & AMS_ISR0_ALARM_2_TO_0_MASK) <<
+ AMS_CONF1_ALARM_2_TO_0_SHIFT);
+ cfg &= ~((pl_alarm_mask & AMS_ISR0_ALARM_6_TO_3_MASK) <<
+ AMS_CONF1_ALARM_6_TO_3_SHIFT);
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1,
+ AMS_REGCFG1_ALARM_MASK, cfg);
+
+ cfg = ~((pl_alarm_mask >> AMS_CONF3_ALARM_12_TO_7_SHIFT) &
+ AMS_ISR0_ALARM_12_TO_7_MASK);
+ ams->pl_bus->update(ams, AMS_REG_CONFIG3,
+ AMS_REGCFG3_ALARM_MASK, cfg);
+ }
+
+ spin_lock_irqsave(&ams->lock, flags);
+ ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
+ spin_unlock_irqrestore(&ams->lock, flags);
+}
+
+static void ams_enable_channel_sequence(struct ams *ams)
+{
+ int i;
+ unsigned long long scan_mask;
+ struct iio_dev *indio_dev = iio_priv_to_dev(ams);
+
+ /* Enable channel sequence. First 22 bit of scan_mask represent
+ * PS channels, and next remaining bit represents PL channels.
+ */
+
+ /* Run calibration of PS & PL as part of the sequence */
+ scan_mask = 1 | (1 << PS_SEQ_MAX);
+ for (i = 0; i < indio_dev->num_channels; i++)
+ scan_mask |= BIT(indio_dev->channels[i].scan_index);
+
+ if (ams->ps_base) {
+ /* put sysmon in a soft reset to change the sequence */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+
+ /* configure basic channels */
+ ams_ps_write_reg(ams, AMS_REG_SEQ_CH0,
+ scan_mask & AMS_REG_SEQ0_MASK);
+ ams_ps_write_reg(ams, AMS_REG_SEQ_CH2, AMS_REG_SEQ2_MASK &
+ (scan_mask >> AMS_REG_SEQ2_MASK_SHIFT));
+
+ /* set continuous sequence mode */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_CONTINUOUS);
+ }
+
+ if (ams->pl_base) {
+ /* put sysmon in a soft reset to change the sequence */
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+
+ /* configure basic channels */
+ scan_mask = (scan_mask >> PS_SEQ_MAX);
+ ams->pl_bus->write(ams, AMS_REG_SEQ_CH0,
+ scan_mask & AMS_REG_SEQ0_MASK);
+ ams->pl_bus->write(ams, AMS_REG_SEQ_CH2, AMS_REG_SEQ2_MASK &
+ (scan_mask >> AMS_REG_SEQ2_MASK_SHIFT));
+ ams->pl_bus->write(ams, AMS_REG_SEQ_CH1, AMS_REG_SEQ1_MASK &
+ (scan_mask >> AMS_REG_SEQ1_MASK_SHIFT));
+
+ /* set continuous sequence mode */
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_CONTINUOUS);
+ }
+}
+
+static int iio_ams_init_device(struct ams *ams)
+{
+ int ret = 0;
+ u32 reg;
+
+ /* reset AMS */
+ if (ams->ps_base) {
+ ams_ps_write_reg(ams, AMS_VP_VN, AMS_PS_RESET_VALUE);
+
+ ret = readl_poll_timeout(ams->base + AMS_PS_CSTS, reg,
+ (reg & AMS_PS_CSTS_PS_READY) ==
+ AMS_PS_CSTS_PS_READY, 0,
+ AMS_INIT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* put sysmon in a default state */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+ }
+
+ if (ams->pl_base) {
+ ams->pl_bus->write(ams, AMS_VP_VN, AMS_PL_RESET_VALUE);
+
+ ret = readl_poll_timeout(ams->base + AMS_PL_CSTS, reg,
+ (reg & AMS_PL_CSTS_ACCESS_MASK) ==
+ AMS_PL_CSTS_ACCESS_MASK, 0,
+ AMS_INIT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* put sysmon in a default state */
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+ }
+
+ iio_ams_disable_all_alarm(ams);
+
+ /* Disable interrupt */
+ ams_update_intrmask(ams, ~0, ~0);
+
+ /* Clear any pending interrupt */
+ ams_write_reg(ams, AMS_ISR_0, AMS_ISR0_ALARM_MASK);
+ ams_write_reg(ams, AMS_ISR_1, AMS_ISR1_ALARM_MASK);
+
+ return ret;
+}
+
+static void ams_enable_single_channel(struct ams *ams, unsigned int offset)
+{
+ u8 channel_num = 0;
+
+ switch (offset) {
+ case AMS_VCC_PSPLL0:
+ channel_num = AMS_VCC_PSPLL0_CH;
+ break;
+ case AMS_VCC_PSPLL3:
+ channel_num = AMS_VCC_PSPLL3_CH;
+ break;
+ case AMS_VCCINT:
+ channel_num = AMS_VCCINT_CH;
+ break;
+ case AMS_VCCBRAM:
+ channel_num = AMS_VCCBRAM_CH;
+ break;
+ case AMS_VCCAUX:
+ channel_num = AMS_VCCAUX_CH;
+ break;
+ case AMS_PSDDRPLL:
+ channel_num = AMS_PSDDRPLL_CH;
+ break;
+ case AMS_PSINTFPDDR:
+ channel_num = AMS_PSINTFPDDR_CH;
+ break;
+ default:
+ break;
+ }
+
+ /* set single channel, sequencer off mode */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_SINGLE_CHANNEL);
+
+ /* write the channel number */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG0, AMS_CONF0_CHANNEL_NUM_MASK,
+ channel_num);
+ mdelay(1);
+}
+
+static void ams_read_vcc_reg(struct ams *ams, unsigned int offset, u32 *data)
+{
+ ams_enable_single_channel(ams, offset);
+ ams_read_reg(ams, offset, data);
+ ams_enable_channel_sequence(ams);
+}
+
+static int ams_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ams *ams = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&ams->mutex);
+ if (chan->scan_index >= (PS_SEQ_MAX * 3))
+ ams_read_vcc_reg(ams, chan->address, val);
+ else if (chan->scan_index >= PS_SEQ_MAX)
+ ams->pl_bus->read(ams, chan->address, val);
+ else
+ ams_ps_read_reg(ams, chan->address, val);
+ mutex_unlock(&ams->mutex);
+
+ *val2 = 0;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ switch (chan->address) {
+ case AMS_SUPPLY1:
+ case AMS_SUPPLY2:
+ case AMS_SUPPLY3:
+ case AMS_SUPPLY4:
+ *val = AMS_SUPPLY_SCALE_3VOLT;
+ break;
+ case AMS_SUPPLY5:
+ case AMS_SUPPLY6:
+ if (chan->scan_index < PS_SEQ_MAX)
+ *val = AMS_SUPPLY_SCALE_6VOLT;
+ else
+ *val = AMS_SUPPLY_SCALE_3VOLT;
+ break;
+ case AMS_SUPPLY7:
+ case AMS_SUPPLY8:
+ *val = AMS_SUPPLY_SCALE_6VOLT;
+ break;
+ case AMS_SUPPLY9:
+ case AMS_SUPPLY10:
+ if (chan->scan_index < PS_SEQ_MAX)
+ *val = AMS_SUPPLY_SCALE_3VOLT;
+ else
+ *val = AMS_SUPPLY_SCALE_6VOLT;
+ break;
+ default:
+ if (chan->scan_index >= (PS_SEQ_MAX * 3))
+ *val = AMS_SUPPLY_SCALE_3VOLT;
+ else
+ *val = AMS_SUPPLY_SCALE_1VOLT;
+ break;
+ }
+ *val2 = AMS_SUPPLY_SCALE_DIV_BIT;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ *val = AMS_TEMP_SCALE;
+ *val2 = AMS_TEMP_SCALE_DIV_BIT;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only the temperature channel has an offset */
+ *val = AMS_TEMP_OFFSET;
+ *val2 = 0;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int ams_get_alarm_offset(int scan_index, enum iio_event_direction dir)
+{
+ int offset = 0;
+
+ if (scan_index >= PS_SEQ_MAX)
+ scan_index -= PS_SEQ_MAX;
+
+ if (dir == IIO_EV_DIR_FALLING) {
+ if (scan_index < AMS_SEQ_SUPPLY7)
+ offset = AMS_ALARM_THRESOLD_OFF_10;
+ else
+ offset = AMS_ALARM_THRESOLD_OFF_20;
+ }
+
+ switch (scan_index) {
+ case AMS_SEQ_TEMP:
+ return (AMS_ALARM_TEMP + offset);
+ case AMS_SEQ_SUPPLY1:
+ return (AMS_ALARM_SUPPLY1 + offset);
+ case AMS_SEQ_SUPPLY2:
+ return (AMS_ALARM_SUPPLY2 + offset);
+ case AMS_SEQ_SUPPLY3:
+ return (AMS_ALARM_SUPPLY3 + offset);
+ case AMS_SEQ_SUPPLY4:
+ return (AMS_ALARM_SUPPLY4 + offset);
+ case AMS_SEQ_SUPPLY5:
+ return (AMS_ALARM_SUPPLY5 + offset);
+ case AMS_SEQ_SUPPLY6:
+ return (AMS_ALARM_SUPPLY6 + offset);
+ case AMS_SEQ_SUPPLY7:
+ return (AMS_ALARM_SUPPLY7 + offset);
+ case AMS_SEQ_SUPPLY8:
+ return (AMS_ALARM_SUPPLY8 + offset);
+ case AMS_SEQ_SUPPLY9:
+ return (AMS_ALARM_SUPPLY9 + offset);
+ case AMS_SEQ_SUPPLY10:
+ return (AMS_ALARM_SUPPLY10 + offset);
+ case AMS_SEQ_VCCAMS:
+ return (AMS_ALARM_VCCAMS + offset);
+ case AMS_SEQ_TEMP_REMOTE:
+ return (AMS_ALARM_TEMP_REMOTE + offset);
+ }
+
+ return 0;
+}
+
+static const struct iio_chan_spec *ams_event_to_channel(
+ struct iio_dev *indio_dev, u32 event)
+{
+ int scan_index = 0, i;
+
+ if (event >= AMS_PL_ALARM_START) {
+ event -= AMS_PL_ALARM_START;
+ scan_index = PS_SEQ_MAX;
+ }
+
+ switch (event) {
+ case AMS_ALARM_BIT_TEMP:
+ scan_index += AMS_SEQ_TEMP;
+ break;
+ case AMS_ALARM_BIT_SUPPLY1:
+ scan_index += AMS_SEQ_SUPPLY1;
+ break;
+ case AMS_ALARM_BIT_SUPPLY2:
+ scan_index += AMS_SEQ_SUPPLY2;
+ break;
+ case AMS_ALARM_BIT_SUPPLY3:
+ scan_index += AMS_SEQ_SUPPLY3;
+ break;
+ case AMS_ALARM_BIT_SUPPLY4:
+ scan_index += AMS_SEQ_SUPPLY4;
+ break;
+ case AMS_ALARM_BIT_SUPPLY5:
+ scan_index += AMS_SEQ_SUPPLY5;
+ break;
+ case AMS_ALARM_BIT_SUPPLY6:
+ scan_index += AMS_SEQ_SUPPLY6;
+ break;
+ case AMS_ALARM_BIT_SUPPLY7:
+ scan_index += AMS_SEQ_SUPPLY7;
+ break;
+ case AMS_ALARM_BIT_SUPPLY8:
+ scan_index += AMS_SEQ_SUPPLY8;
+ break;
+ case AMS_ALARM_BIT_SUPPLY9:
+ scan_index += AMS_SEQ_SUPPLY9;
+ break;
+ case AMS_ALARM_BIT_SUPPLY10:
+ scan_index += AMS_SEQ_SUPPLY10;
+ break;
+ case AMS_ALARM_BIT_VCCAMS:
+ scan_index += AMS_SEQ_VCCAMS;
+ break;
+ case AMS_ALARM_BIT_TEMP_REMOTE:
+ scan_index += AMS_SEQ_TEMP_REMOTE;
+ break;
+ }
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].scan_index == scan_index)
+ break;
+
+ return &indio_dev->channels[i];
+}
+
+static int ams_get_alarm_mask(int scan_index)
+{
+ int bit = 0;
+
+ if (scan_index >= PS_SEQ_MAX) {
+ bit = AMS_PL_ALARM_START;
+ scan_index -= PS_SEQ_MAX;
+ }
+
+ switch (scan_index) {
+ case AMS_SEQ_TEMP:
+ return BIT(AMS_ALARM_BIT_TEMP + bit);
+ case AMS_SEQ_SUPPLY1:
+ return BIT(AMS_ALARM_BIT_SUPPLY1 + bit);
+ case AMS_SEQ_SUPPLY2:
+ return BIT(AMS_ALARM_BIT_SUPPLY2 + bit);
+ case AMS_SEQ_SUPPLY3:
+ return BIT(AMS_ALARM_BIT_SUPPLY3 + bit);
+ case AMS_SEQ_SUPPLY4:
+ return BIT(AMS_ALARM_BIT_SUPPLY4 + bit);
+ case AMS_SEQ_SUPPLY5:
+ return BIT(AMS_ALARM_BIT_SUPPLY5 + bit);
+ case AMS_SEQ_SUPPLY6:
+ return BIT(AMS_ALARM_BIT_SUPPLY6 + bit);
+ case AMS_SEQ_SUPPLY7:
+ return BIT(AMS_ALARM_BIT_SUPPLY7 + bit);
+ case AMS_SEQ_SUPPLY8:
+ return BIT(AMS_ALARM_BIT_SUPPLY8 + bit);
+ case AMS_SEQ_SUPPLY9:
+ return BIT(AMS_ALARM_BIT_SUPPLY9 + bit);
+ case AMS_SEQ_SUPPLY10:
+ return BIT(AMS_ALARM_BIT_SUPPLY10 + bit);
+ case AMS_SEQ_VCCAMS:
+ return BIT(AMS_ALARM_BIT_VCCAMS + bit);
+ case AMS_SEQ_TEMP_REMOTE:
+ return BIT(AMS_ALARM_BIT_TEMP_REMOTE + bit);
+ }
+
+ return 0;
+}
+
+static int ams_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ams *ams = iio_priv(indio_dev);
+
+ return (ams->alarm_mask & ams_get_alarm_mask(chan->scan_index)) ? 1 : 0;
+}
+
+static int ams_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int alarm;
+
+ alarm = ams_get_alarm_mask(chan->scan_index);
+
+ mutex_lock(&ams->mutex);
+
+ if (state)
+ ams->alarm_mask |= alarm;
+ else
+ ams->alarm_mask &= ~alarm;
+
+ iio_ams_update_alarm(ams, ams->alarm_mask);
+
+ mutex_unlock(&ams->mutex);
+
+ return 0;
+}
+
+static int ams_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int offset = ams_get_alarm_offset(chan->scan_index, dir);
+
+ mutex_lock(&ams->mutex);
+
+ if (chan->scan_index >= PS_SEQ_MAX)
+ ams->pl_bus->read(ams, offset, val);
+ else
+ ams_ps_read_reg(ams, offset, val);
+
+ mutex_unlock(&ams->mutex);
+
+ *val2 = 0;
+ return IIO_VAL_INT;
+}
+
+static int ams_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int offset;
+
+ mutex_lock(&ams->mutex);
+
+ /* Set temperature channel threshold to direct threshold */
+ if (chan->type == IIO_TEMP) {
+ offset = ams_get_alarm_offset(chan->scan_index,
+ IIO_EV_DIR_FALLING);
+
+ if (chan->scan_index >= PS_SEQ_MAX)
+ ams->pl_bus->update(ams, offset,
+ AMS_ALARM_THR_DIRECT_MASK,
+ AMS_ALARM_THR_DIRECT_MASK);
+ else
+ ams_ps_update_reg(ams, offset,
+ AMS_ALARM_THR_DIRECT_MASK,
+ AMS_ALARM_THR_DIRECT_MASK);
+ }
+
+ offset = ams_get_alarm_offset(chan->scan_index, dir);
+ if (chan->scan_index >= PS_SEQ_MAX)
+ ams->pl_bus->write(ams, offset, val);
+ else
+ ams_ps_write_reg(ams, offset, val);
+
+ mutex_unlock(&ams->mutex);
+
+ return 0;
+}
+
+static void ams_handle_event(struct iio_dev *indio_dev, u32 event)
+{
+ const struct iio_chan_spec *chan;
+
+ chan = ams_event_to_channel(indio_dev, event);
+
+ if (chan->type == IIO_TEMP) {
+ /* The temperature channel only supports over-temperature
+ * events
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ } else {
+ /* For other channels we don't know whether it is a upper or
+ * lower threshold event. Userspace will have to check the
+ * channel value if it wants to know.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+ }
+}
+
+static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events)
+{
+ unsigned int bit;
+
+ for_each_set_bit(bit, &events, AMS_NO_OF_ALARMS)
+ ams_handle_event(indio_dev, bit);
+}
+
+/**
+ * ams_unmask_worker - ams alarm interrupt unmask worker
+ * @work : work to be done
+ *
+ * The ZynqMP threshold interrupts are level sensitive. Since we can't make the
+ * threshold condition go way from within the interrupt handler, this means as
+ * soon as a threshold condition is present we would enter the interrupt handler
+ * again and again. To work around this we mask all active thresholds interrupts
+ * in the interrupt handler and start a timer. In this timer we poll the
+ * interrupt status and only if the interrupt is inactive we unmask it again.
+ */
+static void ams_unmask_worker(struct work_struct *work)
+{
+ struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
+ unsigned int status, unmask;
+
+ spin_lock_irq(&ams->lock);
+
+ ams_read_reg(ams, AMS_ISR_0, &status);
+
+ /* Clear those bits which are not active anymore */
+ unmask = (ams->masked_alarm ^ status) & ams->masked_alarm;
+
+ /* clear status of disabled alarm */
+ unmask |= ams->intr_mask;
+
+ ams->masked_alarm &= status;
+
+ /* Also clear those which are masked out anyway */
+ ams->masked_alarm &= ~ams->intr_mask;
+
+ /* Clear the interrupts before we unmask them */
+ ams_write_reg(ams, AMS_ISR_0, unmask);
+
+ ams_update_intrmask(ams, 0, 0);
+
+ spin_unlock_irq(&ams->lock);
+
+ /* if still pending some alarm re-trigger the timer */
+ if (ams->masked_alarm)
+ schedule_delayed_work(&ams->ams_unmask_work,
+ msecs_to_jiffies(AMS_UNMASK_TIMEOUT));
+}
+
+static irqreturn_t ams_iio_irq(int irq, void *data)
+{
+ unsigned int isr0, isr1;
+ struct iio_dev *indio_dev = data;
+ struct ams *ams = iio_priv(indio_dev);
+
+ spin_lock(&ams->lock);
+
+ ams_read_reg(ams, AMS_ISR_0, &isr0);
+ ams_read_reg(ams, AMS_ISR_1, &isr1);
+
+ /* only process alarm that are not masked */
+ isr0 &= ~((ams->intr_mask & AMS_ISR0_ALARM_MASK) | ams->masked_alarm);
+ isr1 &= ~(ams->intr_mask >> AMS_ISR1_INTR_MASK_SHIFT);
+
+ /* clear interrupt */
+ ams_write_reg(ams, AMS_ISR_0, isr0);
+ ams_write_reg(ams, AMS_ISR_1, isr1);
+
+ if (isr0) {
+ /* Once the alarm interrupt occurred, mask until get cleared */
+ ams->masked_alarm |= isr0;
+ ams_update_intrmask(ams, 0, 0);
+
+ ams_handle_events(indio_dev, isr0);
+
+ schedule_delayed_work(&ams->ams_unmask_work,
+ msecs_to_jiffies(AMS_UNMASK_TIMEOUT));
+ }
+
+ spin_unlock(&ams->lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_event_spec ams_temp_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
+static const struct iio_event_spec ams_voltage_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec ams_ps_channels[] = {
+ AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP, "ps_temp"),
+ AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP_REMOTE, AMS_TEMP_REMOTE, "remote_temp"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, "vccpsintlp"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, "vccpsintfp"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, "vccpsaux"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, "vccpsddr"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, "vccpsio3"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, "vccpsio0"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, "vccpsio1"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, "vccpsio2"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, "psmgtravcc"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, "psmgtravtt"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, "vccams"),
+};
+
+static const struct iio_chan_spec ams_pl_channels[] = {
+ AMS_PL_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP, "pl_temp"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, "vccint", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, "vccaux", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFP, AMS_VREFP, "vccvrefp", false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFN, AMS_VREFN, "vccvrefn", false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, "vccbram", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, "vccplintlp", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, "vccplintfp", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, "vccplaux", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, "vccams", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VP_VN, AMS_VP_VN, "vccvpvn", false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, "vuser0", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, "vuser1", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, "vuser2", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, "vuser3", true),
+ AMS_PL_AUX_CHAN_VOLTAGE(0, "vccaux0"),
+ AMS_PL_AUX_CHAN_VOLTAGE(1, "vccaux1"),
+ AMS_PL_AUX_CHAN_VOLTAGE(2, "vccaux2"),
+ AMS_PL_AUX_CHAN_VOLTAGE(3, "vccaux3"),
+ AMS_PL_AUX_CHAN_VOLTAGE(4, "vccaux4"),
+ AMS_PL_AUX_CHAN_VOLTAGE(5, "vccaux5"),
+ AMS_PL_AUX_CHAN_VOLTAGE(6, "vccaux6"),
+ AMS_PL_AUX_CHAN_VOLTAGE(7, "vccaux7"),
+ AMS_PL_AUX_CHAN_VOLTAGE(8, "vccaux8"),
+ AMS_PL_AUX_CHAN_VOLTAGE(9, "vccaux9"),
+ AMS_PL_AUX_CHAN_VOLTAGE(10, "vccaux10"),
+ AMS_PL_AUX_CHAN_VOLTAGE(11, "vccaux11"),
+ AMS_PL_AUX_CHAN_VOLTAGE(12, "vccaux12"),
+ AMS_PL_AUX_CHAN_VOLTAGE(13, "vccaux13"),
+ AMS_PL_AUX_CHAN_VOLTAGE(14, "vccaux14"),
+ AMS_PL_AUX_CHAN_VOLTAGE(15, "vccaux15"),
+};
+
+static const struct iio_chan_spec ams_ctrl_channels[] = {
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSPLL, AMS_VCC_PSPLL0, "vcc_pspll0"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSBATT, AMS_VCC_PSPLL3, "vcc_psbatt"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCINT, AMS_VCCINT, "vccint"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCBRAM, AMS_VCCBRAM, "vccbram"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCAUX, AMS_VCCAUX, "vccaux"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_PSDDRPLL, AMS_PSDDRPLL, "vcc_psddrpll"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_INTDDR, AMS_PSINTFPDDR, "vccpsintfpddr"),
+};
+
+static int ams_init_module(struct iio_dev *indio_dev, struct device_node *np,
+ struct iio_chan_spec *channels)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ struct device_node *chan_node, *child;
+ int ret, num_channels = 0;
+ unsigned int reg;
+
+ if (of_device_is_compatible(np, "xlnx,zynqmp-ams-ps")) {
+ ams->ps_base = of_iomap(np, 0);
+ if (!ams->ps_base)
+ return -ENXIO;
+
+ /* add PS channels to iio device channels */
+ memcpy(channels + num_channels, ams_ps_channels,
+ sizeof(ams_ps_channels));
+ num_channels += ARRAY_SIZE(ams_ps_channels);
+ } else if (of_device_is_compatible(np, "xlnx,zynqmp-ams-pl")) {
+ ams->pl_base = of_iomap(np, 0);
+ if (!ams->pl_base)
+ return -ENXIO;
+
+ /* Copy only first 10 fix channels */
+ memcpy(channels + num_channels, ams_pl_channels,
+ AMS_PL_MAX_FIXED_CHANNEL * sizeof(*channels));
+ num_channels += AMS_PL_MAX_FIXED_CHANNEL;
+
+ chan_node = of_get_child_by_name(np, "xlnx,ext-channels");
+ if (chan_node) {
+ for_each_child_of_node(chan_node, child) {
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret || reg > AMS_PL_MAX_EXT_CHANNEL)
+ continue;
+
+ memcpy(&channels[num_channels],
+ &ams_pl_channels[reg +
+ AMS_PL_MAX_FIXED_CHANNEL],
+ sizeof(*channels));
+
+ if (of_property_read_bool(child,
+ "xlnx,bipolar"))
+ channels[num_channels].
+ scan_type.sign = 's';
+
+ num_channels += 1;
+ }
+ }
+ of_node_put(chan_node);
+ } else if (of_device_is_compatible(np, "xlnx,zynqmp-ams")) {
+ /* add AMS channels to iio device channels */
+ memcpy(channels + num_channels, ams_ctrl_channels,
+ sizeof(ams_ctrl_channels));
+ num_channels += ARRAY_SIZE(ams_ctrl_channels);
+ } else {
+ return -EINVAL;
+ }
+
+ return num_channels;
+}
+
+static int ams_parse_dt(struct iio_dev *indio_dev, struct platform_device *pdev)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ struct iio_chan_spec *ams_channels, *dev_channels;
+ struct device_node *child_node = NULL, *np = pdev->dev.of_node;
+ int ret, chan_vol = 0, chan_temp = 0, i, rising_off, falling_off;
+ unsigned int num_channels = 0;
+
+ /* Initialize buffer for channel specification */
+ ams_channels = kzalloc(sizeof(ams_ps_channels) +
+ sizeof(ams_pl_channels) +
+ sizeof(ams_ctrl_channels), GFP_KERNEL);
+ if (!ams_channels)
+ return -ENOMEM;
+
+ if (of_device_is_available(np)) {
+ ret = ams_init_module(indio_dev, np, ams_channels);
+ if (ret < 0) {
+ kfree(ams_channels);
+ return ret;
+ }
+
+ num_channels += ret;
+ }
+
+ for_each_child_of_node(np, child_node) {
+ if (of_device_is_available(child_node)) {
+ ret = ams_init_module(indio_dev, child_node,
+ ams_channels + num_channels);
+ if (ret < 0) {
+ kfree(ams_channels);
+ return ret;
+ }
+
+ num_channels += ret;
+ }
+ }
+
+ for (i = 0; i < num_channels; i++) {
+ if (ams_channels[i].type == IIO_VOLTAGE)
+ ams_channels[i].channel = chan_vol++;
+ else
+ ams_channels[i].channel = chan_temp++;
+
+ if (ams_channels[i].scan_index < (PS_SEQ_MAX * 3)) {
+ /* set threshold to max and min for each channel */
+ falling_off = ams_get_alarm_offset(
+ ams_channels[i].scan_index,
+ IIO_EV_DIR_FALLING);
+ rising_off = ams_get_alarm_offset(
+ ams_channels[i].scan_index,
+ IIO_EV_DIR_RISING);
+ if (ams_channels[i].scan_index >= PS_SEQ_MAX) {
+ ams->pl_bus->write(ams, falling_off,
+ AMS_ALARM_THR_MIN);
+ ams->pl_bus->write(ams, rising_off,
+ AMS_ALARM_THR_MAX);
+ } else {
+ ams_ps_write_reg(ams, falling_off,
+ AMS_ALARM_THR_MIN);
+ ams_ps_write_reg(ams, rising_off,
+ AMS_ALARM_THR_MAX);
+ }
+ }
+ }
+
+ dev_channels = devm_kzalloc(&pdev->dev, sizeof(*dev_channels) *
+ num_channels, GFP_KERNEL);
+ if (!dev_channels) {
+ kfree(ams_channels);
+ return -ENOMEM;
+ }
+
+ memcpy(dev_channels, ams_channels,
+ sizeof(*ams_channels) * num_channels);
+ kfree(ams_channels);
+ indio_dev->channels = dev_channels;
+ indio_dev->num_channels = num_channels;
+
+ return 0;
+}
+
+static const struct iio_info iio_pl_info = {
+ .read_raw = &ams_read_raw,
+ .read_event_config = &ams_read_event_config,
+ .write_event_config = &ams_write_event_config,
+ .read_event_value = &ams_read_event_value,
+ .write_event_value = &ams_write_event_value,
+};
+
+static const struct ams_pl_bus_ops ams_pl_apb = {
+ .read = ams_apb_pl_read_reg,
+ .write = ams_apb_pl_write_reg,
+ .update = ams_apb_pl_update_reg,
+};
+
+static const struct of_device_id ams_of_match_table[] = {
+ { .compatible = "xlnx,zynqmp-ams", &ams_pl_apb },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ams_of_match_table);
+
+static int ams_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct ams *ams;
+ struct resource *res;
+ const struct of_device_id *id;
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ id = of_match_node(ams_of_match_table, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*ams));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ams = iio_priv(indio_dev);
+ ams->pl_bus = id->data;
+ mutex_init(&ams->mutex);
+ spin_lock_init(&ams->lock);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->name = "ams";
+
+ indio_dev->info = &iio_pl_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ams-base");
+ ams->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ams->base))
+ return PTR_ERR(ams->base);
+
+ INIT_DELAYED_WORK(&ams->ams_unmask_work, ams_unmask_worker);
+
+ ams->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ams->clk))
+ return PTR_ERR(ams->clk);
+ clk_prepare_enable(ams->clk);
+
+ ret = iio_ams_init_device(ams);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize AMS\n");
+ goto clk_disable;
+ }
+
+ ret = ams_parse_dt(indio_dev, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failure in parsing DT\n");
+ goto clk_disable;
+ }
+
+ ams_enable_channel_sequence(ams);
+
+ ams->irq = platform_get_irq_byname(pdev, "ams-irq");
+ ret = devm_request_irq(&pdev->dev, ams->irq, &ams_iio_irq, 0, "ams-irq",
+ indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register interrupt\n");
+ goto clk_disable;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return iio_device_register(indio_dev);
+
+clk_disable:
+ clk_disable_unprepare(ams->clk);
+ return ret;
+}
+
+static int ams_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct ams *ams = iio_priv(indio_dev);
+
+ cancel_delayed_work(&ams->ams_unmask_work);
+
+ /* Unregister the device */
+ iio_device_unregister(indio_dev);
+ clk_disable_unprepare(ams->clk);
+ return 0;
+}
+
+static int __maybe_unused ams_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ams *ams = iio_priv(indio_dev);
+
+ clk_disable_unprepare(ams->clk);
+
+ return 0;
+}
+
+static int __maybe_unused ams_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ams *ams = iio_priv(indio_dev);
+
+ clk_prepare_enable(ams->clk);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ams_pm_ops, ams_suspend, ams_resume);
+
+static struct platform_driver ams_driver = {
+ .probe = ams_probe,
+ .remove = ams_remove,
+ .driver = {
+ .name = "ams",
+ .pm = &ams_pm_ops,
+ .of_match_table = ams_of_match_table,
+ },
+};
+module_platform_driver(ams_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rajnikant Bhojani <rajnikant.bhojani@xilinx.com>");
diff --git a/drivers/iio/adc/xilinx-ams.h b/drivers/iio/adc/xilinx-ams.h
new file mode 100644
index 000000000000..3d900a9df82f
--- /dev/null
+++ b/drivers/iio/adc/xilinx-ams.h
@@ -0,0 +1,278 @@
+#ifndef __XILINX_AMS_H__
+#define __XILINX_AMS_H__
+
+#define AMS_MISC_CTRL 0x000
+#define AMS_ISR_0 0x010
+#define AMS_ISR_1 0x014
+#define AMS_IMR_0 0x018
+#define AMS_IMR_1 0x01c
+#define AMS_IER_0 0x020
+#define AMS_IER_1 0x024
+#define AMS_IDR_0 0x028
+#define AMS_IDR_1 0x02c
+#define AMS_PS_CSTS 0x040
+#define AMS_PL_CSTS 0x044
+#define AMS_MON_CSTS 0x050
+
+#define AMS_VCC_PSPLL0 0x060
+#define AMS_VCC_PSPLL3 0x06C
+#define AMS_VCCINT 0x078
+#define AMS_VCCBRAM 0x07C
+#define AMS_VCCAUX 0x080
+#define AMS_PSDDRPLL 0x084
+#define AMS_PSINTFPDDR 0x09C
+
+#define AMS_VCC_PSPLL0_CH 48
+#define AMS_VCC_PSPLL3_CH 51
+#define AMS_VCCINT_CH 54
+#define AMS_VCCBRAM_CH 55
+#define AMS_VCCAUX_CH 56
+#define AMS_PSDDRPLL_CH 57
+#define AMS_PSINTFPDDR_CH 63
+
+#define AMS_REG_CONFIG0 0x100
+#define AMS_REG_CONFIG1 0x104
+#define AMS_REG_CONFIG2 0x108
+#define AMS_REG_CONFIG3 0x10C
+#define AMS_REG_CONFIG4 0x110
+#define AMS_REG_SEQ_CH0 0x120
+#define AMS_REG_SEQ_CH1 0x124
+#define AMS_REG_SEQ_CH2 0x118
+
+#define AMS_TEMP 0x000
+#define AMS_SUPPLY1 0x004
+#define AMS_SUPPLY2 0x008
+#define AMS_VP_VN 0x00c
+#define AMS_VREFP 0x010
+#define AMS_VREFN 0x014
+#define AMS_SUPPLY3 0x018
+#define AMS_SUPPLY4 0x034
+#define AMS_SUPPLY5 0x038
+#define AMS_SUPPLY6 0x03c
+#define AMS_SUPPLY7 0x200
+#define AMS_SUPPLY8 0x204
+#define AMS_SUPPLY9 0x208
+#define AMS_SUPPLY10 0x20c
+#define AMS_VCCAMS 0x210
+#define AMS_TEMP_REMOTE 0x214
+
+#define AMS_REG_VAUX(x) (0x40 + (4*(x)))
+#define AMS_REG_VUSER(x) (0x200 + (4*(x)))
+
+#define AMS_PS_RESET_VALUE 0xFFFFU
+#define AMS_PL_RESET_VALUE 0xFFFFU
+
+#define AMS_CONF0_CHANNEL_NUM_MASK (0x3f << 0)
+
+#define AMS_CONF1_SEQ_MASK (0xf << 12)
+#define AMS_CONF1_SEQ_DEFAULT (0 << 12)
+#define AMS_CONF1_SEQ_SINGLE_PASS (1 << 12)
+#define AMS_CONF1_SEQ_CONTINUOUS (2 << 12)
+#define AMS_CONF1_SEQ_SINGLE_CHANNEL (3 << 12)
+
+#define AMS_REG_SEQ0_MASK 0xFFFF
+#define AMS_REG_SEQ2_MASK 0x3F
+#define AMS_REG_SEQ1_MASK 0xFFFF
+#define AMS_REG_SEQ2_MASK_SHIFT 16
+#define AMS_REG_SEQ1_MASK_SHIFT 22
+
+#define AMS_REGCFG1_ALARM_MASK 0xF0F
+#define AMS_REGCFG3_ALARM_MASK 0x3F
+
+#define AMS_ALARM_TEMP 0x140
+#define AMS_ALARM_SUPPLY1 0x144
+#define AMS_ALARM_SUPPLY2 0x148
+#define AMS_ALARM_OT 0x14c
+
+#define AMS_ALARM_SUPPLY3 0x160
+#define AMS_ALARM_SUPPLY4 0x164
+#define AMS_ALARM_SUPPLY5 0x168
+#define AMS_ALARM_SUPPLY6 0x16c
+#define AMS_ALARM_SUPPLY7 0x180
+#define AMS_ALARM_SUPPLY8 0x184
+#define AMS_ALARM_SUPPLY9 0x188
+#define AMS_ALARM_SUPPLY10 0x18c
+#define AMS_ALARM_VCCAMS 0x190
+#define AMS_ALARM_TEMP_REMOTE 0x194
+#define AMS_ALARM_THRESOLD_OFF_10 0x10
+#define AMS_ALARM_THRESOLD_OFF_20 0x20
+
+#define AMS_ALARM_THR_DIRECT_MASK 0x01
+#define AMS_ALARM_THR_MIN 0x0000
+#define AMS_ALARM_THR_MAX 0xffff
+
+#define AMS_NO_OF_ALARMS 32
+#define AMS_PL_ALARM_START 16
+#define AMS_ISR0_ALARM_MASK 0xFFFFFFFFU
+#define AMS_ISR1_ALARM_MASK 0xE000001FU
+#define AMS_ISR1_INTR_MASK_SHIFT 32
+#define AMS_ISR0_ALARM_2_TO_0_MASK 0x07
+#define AMS_ISR0_ALARM_6_TO_3_MASK 0x78
+#define AMS_ISR0_ALARM_12_TO_7_MASK 0x3F
+#define AMS_CONF1_ALARM_2_TO_0_SHIFT 1
+#define AMS_CONF1_ALARM_6_TO_3_SHIFT 5
+#define AMS_CONF3_ALARM_12_TO_7_SHIFT 8
+
+#define AMS_PS_CSTS_PS_READY 0x08010000U
+#define AMS_PL_CSTS_ACCESS_MASK 0x00000001U
+
+#define AMS_PL_MAX_FIXED_CHANNEL 10
+#define AMS_PL_MAX_EXT_CHANNEL 20
+
+#define AMS_INIT_TIMEOUT 10000
+
+/* Following scale and offset value is derivef from
+ * UG580 (v1.7) December 20, 2016
+ */
+#define AMS_SUPPLY_SCALE_1VOLT 1000
+#define AMS_SUPPLY_SCALE_3VOLT 3000
+#define AMS_SUPPLY_SCALE_6VOLT 6000
+#define AMS_SUPPLY_SCALE_DIV_BIT 16
+
+#define AMS_TEMP_SCALE 509314
+#define AMS_TEMP_SCALE_DIV_BIT 16
+#define AMS_TEMP_OFFSET -((280230L << 16) / 509314)
+
+enum ams_alarm_bit {
+ AMS_ALARM_BIT_TEMP,
+ AMS_ALARM_BIT_SUPPLY1,
+ AMS_ALARM_BIT_SUPPLY2,
+ AMS_ALARM_BIT_SUPPLY3,
+ AMS_ALARM_BIT_SUPPLY4,
+ AMS_ALARM_BIT_SUPPLY5,
+ AMS_ALARM_BIT_SUPPLY6,
+ AMS_ALARM_BIT_RESERVED,
+ AMS_ALARM_BIT_SUPPLY7,
+ AMS_ALARM_BIT_SUPPLY8,
+ AMS_ALARM_BIT_SUPPLY9,
+ AMS_ALARM_BIT_SUPPLY10,
+ AMS_ALARM_BIT_VCCAMS,
+ AMS_ALARM_BIT_TEMP_REMOTE
+};
+
+enum ams_seq {
+ AMS_SEQ_VCC_PSPLL,
+ AMS_SEQ_VCC_PSBATT,
+ AMS_SEQ_VCCINT,
+ AMS_SEQ_VCCBRAM,
+ AMS_SEQ_VCCAUX,
+ AMS_SEQ_PSDDRPLL,
+ AMS_SEQ_INTDDR
+};
+
+enum ams_ps_pl_seq {
+ AMS_SEQ_CALIB,
+ AMS_SEQ_RSVD_1,
+ AMS_SEQ_RSVD_2,
+ AMS_SEQ_TEST,
+ AMS_SEQ_RSVD_4,
+ AMS_SEQ_SUPPLY4,
+ AMS_SEQ_SUPPLY5,
+ AMS_SEQ_SUPPLY6,
+ AMS_SEQ_TEMP,
+ AMS_SEQ_SUPPLY2,
+ AMS_SEQ_SUPPLY1,
+ AMS_SEQ_VP_VN,
+ AMS_SEQ_VREFP,
+ AMS_SEQ_VREFN,
+ AMS_SEQ_SUPPLY3,
+ AMS_SEQ_CURRENT_MON,
+ AMS_SEQ_SUPPLY7,
+ AMS_SEQ_SUPPLY8,
+ AMS_SEQ_SUPPLY9,
+ AMS_SEQ_SUPPLY10,
+ AMS_SEQ_VCCAMS,
+ AMS_SEQ_TEMP_REMOTE,
+ AMS_SEQ_MAX
+};
+
+#define AMS_SEQ(x) (AMS_SEQ_MAX + (x))
+#define AMS_VAUX_SEQ(x) (AMS_SEQ_MAX + (x))
+
+#define PS_SEQ_MAX AMS_SEQ_MAX
+#define PS_SEQ(x) (x)
+#define PL_SEQ(x) (PS_SEQ_MAX + x)
+
+#define AMS_CHAN_TEMP(_scan_index, _addr, _ext) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .event_spec = ams_temp_events, \
+ .num_event_specs = ARRAY_SIZE(ams_temp_events), \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+ .extend_name = _ext, \
+}
+
+#define AMS_CHAN_VOLTAGE(_scan_index, _addr, _ext, _alarm) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .event_spec = (_alarm) ? ams_voltage_events : NULL, \
+ .num_event_specs = (_alarm) ? ARRAY_SIZE(ams_voltage_events) : 0, \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .realbits = 10, \
+ .storagebits = 16, \
+ .shift = 6, \
+ .endianness = IIO_CPU, \
+ }, \
+ .extend_name = _ext, \
+}
+
+#define AMS_PS_CHAN_TEMP(_scan_index, _addr, _ext) \
+ AMS_CHAN_TEMP(PS_SEQ(_scan_index), _addr, _ext)
+#define AMS_PS_CHAN_VOLTAGE(_scan_index, _addr, _ext) \
+ AMS_CHAN_VOLTAGE(PS_SEQ(_scan_index), _addr, _ext, true)
+
+#define AMS_PL_CHAN_TEMP(_scan_index, _addr, _ext) \
+ AMS_CHAN_TEMP(PL_SEQ(_scan_index), _addr, _ext)
+#define AMS_PL_CHAN_VOLTAGE(_scan_index, _addr, _ext, _alarm) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(_scan_index), _addr, _ext, _alarm)
+#define AMS_PL_AUX_CHAN_VOLTAGE(_auxno, _ext) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(AMS_VAUX_SEQ(_auxno)), \
+ AMS_REG_VAUX(_auxno), _ext, false)
+#define AMS_CTRL_CHAN_VOLTAGE(_scan_index, _addr, _ext) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(AMS_VAUX_SEQ(AMS_SEQ(_scan_index))), \
+ _addr, _ext, false)
+
+struct ams {
+ void __iomem *base;
+ void __iomem *ps_base;
+ void __iomem *pl_base;
+ struct clk *clk;
+ struct device *dev;
+
+ struct mutex mutex;
+ spinlock_t lock;
+
+ unsigned int alarm_mask;
+ unsigned int masked_alarm;
+ u64 intr_mask;
+ int irq;
+
+ struct delayed_work ams_unmask_work;
+ const struct ams_pl_bus_ops *pl_bus;
+};
+
+struct ams_pl_bus_ops {
+ void (*read)(struct ams *ams, unsigned int offset, unsigned int *data);
+ void (*write)(struct ams *ams, unsigned int offset, unsigned int data);
+ void (*update)(struct ams *ams, unsigned int offset, u32 mask,
+ u32 data);
+};
+
+#endif /* __XILINX_AMS_H__ */
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 3f0b88b13dd3..24ef7a52c109 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -94,6 +94,9 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
#define XADC_AXI_REG_IPIER 0x68
#define XADC_AXI_ADC_REG_OFFSET 0x200
+/* AXI sysmon offset */
+#define XADC_AXI_SYSMON_REG_OFFSET 0x400
+
#define XADC_AXI_RESET_MAGIC 0xa
#define XADC_AXI_GIER_ENABLE BIT(31)
@@ -468,6 +471,26 @@ static int xadc_axi_write_adc_reg(struct xadc *xadc, unsigned int reg,
return 0;
}
+/* AXI sysmon read/write methods */
+static int xadc_axi_read_sysmon_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ uint32_t val32;
+
+ xadc_read_reg(xadc, XADC_AXI_SYSMON_REG_OFFSET + reg * 4, &val32);
+ *val = val32 & 0xffff;
+
+ return 0;
+}
+
+static int xadc_axi_write_sysmon_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ xadc_write_reg(xadc, XADC_AXI_SYSMON_REG_OFFSET + reg * 4, val);
+
+ return 0;
+}
+
static int xadc_axi_setup(struct platform_device *pdev,
struct iio_dev *indio_dev, int irq)
{
@@ -551,6 +574,17 @@ static const struct xadc_ops xadc_axi_ops = {
.flags = XADC_FLAGS_BUFFERED,
};
+/* AXI sysmon */
+static const struct xadc_ops sysmon_axi_ops = {
+ .read = xadc_axi_read_sysmon_reg,
+ .write = xadc_axi_write_sysmon_reg,
+ .setup = xadc_axi_setup,
+ .get_dclk_rate = xadc_axi_get_dclk,
+ .update_alarm = xadc_axi_update_alarm,
+ .interrupt_handler = xadc_axi_interrupt_handler,
+ .flags = XADC_FLAGS_BUFFERED,
+};
+
static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
uint16_t mask, uint16_t val)
{
@@ -1055,23 +1089,23 @@ static const struct iio_chan_spec xadc_channels[] = {
XADC_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccoddr", true),
XADC_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
XADC_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
- XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
- XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
- XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
- XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
- XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
- XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
- XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
- XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
- XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
- XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
- XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
- XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
- XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
- XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
- XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
- XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
- XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
+ XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, "vpvn", false),
+ XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), "vaux0", false),
+ XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), "vaux1", false),
+ XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), "vaux2", false),
+ XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), "vaux3", false),
+ XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), "vaux4", false),
+ XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), "vaux5", false),
+ XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), "vaux6", false),
+ XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), "vaux7", false),
+ XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), "vaux8", false),
+ XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), "vaux9", false),
+ XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), "vaux10", false),
+ XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), "vaux11", false),
+ XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), "vaux12", false),
+ XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), "vaux13", false),
+ XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), "vaux14", false),
+ XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), "vaux15", false),
};
static const struct iio_info xadc_info = {
@@ -1087,6 +1121,7 @@ static const struct iio_info xadc_info = {
static const struct of_device_id xadc_of_match_table[] = {
{ .compatible = "xlnx,zynq-xadc-1.00.a", (void *)&xadc_zynq_ops },
{ .compatible = "xlnx,axi-xadc-1.00.a", (void *)&xadc_axi_ops },
+ { .compatible = "xlnx,axi-sysmon-1.3", (void *)&sysmon_axi_ops},
{ },
};
MODULE_DEVICE_TABLE(of, xadc_of_match_table);
@@ -1095,7 +1130,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
unsigned int *conf)
{
struct xadc *xadc = iio_priv(indio_dev);
- struct iio_chan_spec *channels, *chan;
+ struct iio_chan_spec *iio_xadc_channels;
struct device_node *chan_node, *child;
unsigned int num_channels;
const char *external_mux;
@@ -1138,12 +1173,12 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
*conf |= XADC_CONF0_MUX | XADC_CONF0_CHAN(ext_mux_chan);
}
- channels = kmemdup(xadc_channels, sizeof(xadc_channels), GFP_KERNEL);
- if (!channels)
+ iio_xadc_channels = kmemdup(xadc_channels, sizeof(xadc_channels),
+ GFP_KERNEL);
+ if (!iio_xadc_channels)
return -ENOMEM;
num_channels = 9;
- chan = &channels[9];
chan_node = of_get_child_by_name(np, "xlnx,channels");
if (chan_node) {
@@ -1157,28 +1192,24 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
if (ret || reg > 16)
continue;
+ iio_xadc_channels[num_channels] = xadc_channels[reg + 9];
+ iio_xadc_channels[num_channels].channel = num_channels - 1;
+
if (of_property_read_bool(child, "xlnx,bipolar"))
- chan->scan_type.sign = 's';
-
- if (reg == 0) {
- chan->scan_index = 11;
- chan->address = XADC_REG_VPVN;
- } else {
- chan->scan_index = 15 + reg;
- chan->address = XADC_REG_VAUX(reg - 1);
- }
+ iio_xadc_channels[num_channels].scan_type.sign = 's';
+
num_channels++;
- chan++;
}
}
of_node_put(chan_node);
indio_dev->num_channels = num_channels;
- indio_dev->channels = krealloc(channels, sizeof(*channels) *
- num_channels, GFP_KERNEL);
+ indio_dev->channels = krealloc(iio_xadc_channels,
+ sizeof(*iio_xadc_channels) *
+ num_channels, GFP_KERNEL);
/* If we can't resize the channels array, just use the original */
if (!indio_dev->channels)
- indio_dev->channels = channels;
+ indio_dev->channels = iio_xadc_channels;
return 0;
}
@@ -1263,29 +1294,14 @@ static int xadc_probe(struct platform_device *pdev)
if (ret)
goto err_free_samplerate_trigger;
- /*
- * Make sure not to exceed the maximum samplerate since otherwise the
- * resulting interrupt storm will soft-lock the system.
- */
- if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
- ret = xadc_read_samplerate(xadc);
- if (ret < 0)
- goto err_free_samplerate_trigger;
- if (ret > XADC_MAX_SAMPLERATE) {
- ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE);
- if (ret < 0)
- goto err_free_samplerate_trigger;
- }
- }
-
- ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0,
- dev_name(&pdev->dev), indio_dev);
+ ret = devm_request_irq(&pdev->dev, irq, xadc->ops->interrupt_handler, 0,
+ dev_name(&pdev->dev), indio_dev);
if (ret)
goto err_clk_disable_unprepare;
ret = xadc->ops->setup(pdev, indio_dev, xadc->irq);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
for (i = 0; i < 16; i++)
xadc_read_adc_reg(xadc, XADC_REG_THRESHOLD(i),
@@ -1293,7 +1309,7 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_CONF0, conf0);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
bipolar_mask = 0;
for (i = 0; i < indio_dev->num_channels; i++) {
@@ -1303,17 +1319,17 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(0), bipolar_mask);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(1),
bipolar_mask >> 16);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
/* Disable all alarms */
ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
XADC_CONF1_ALARM_MASK);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
/* Set thresholds to min/max */
for (i = 0; i < 16; i++) {
@@ -1328,7 +1344,7 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
xadc->threshold[i]);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
}
/* Go to non-buffered mode */
@@ -1336,16 +1352,14 @@ static int xadc_probe(struct platform_device *pdev)
ret = iio_device_register(indio_dev);
if (ret)
- goto err_free_irq;
+ goto err_clk_disable_unprepare;
platform_set_drvdata(pdev, indio_dev);
return 0;
-err_free_irq:
- free_irq(xadc->irq, indio_dev);
- cancel_delayed_work_sync(&xadc->zynq_unmask_work);
err_clk_disable_unprepare:
+ cancel_delayed_work_sync(&xadc->zynq_unmask_work);
clk_disable_unprepare(xadc->clk);
err_free_samplerate_trigger:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
@@ -1373,7 +1387,6 @@ static int xadc_remove(struct platform_device *pdev)
iio_trigger_free(xadc->convst_trigger);
iio_triggered_buffer_cleanup(indio_dev);
}
- free_irq(xadc->irq, indio_dev);
cancel_delayed_work_sync(&xadc->zynq_unmask_work);
clk_disable_unprepare(xadc->clk);
kfree(xadc->data);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 65366ec59087..379488397e0b 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -265,8 +265,12 @@ config XTENSA_MX
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config XILINX_INTC
- bool
+ bool "Xilinx Interrupt Controller (IP core)"
select IRQ_DOMAIN
+ help
+ Support for the Xilinx Interrupt Controller which can be used
+ with MicroBlaze and Zynq. It is a secondary chained controller when
+ used with Zynq.
config IRQ_CROSSBAR
bool
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 50b9ca69e6a2..7e5c33834d80 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -337,6 +337,17 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
return IRQ_SET_MASK_OK_DONE;
}
+
+void gic_set_cpu(unsigned int cpu, unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+ struct cpumask mask;
+
+ cpumask_clear(&mask);
+ cpumask_set_cpu(cpu, &mask);
+ gic_set_affinity(d, &mask, true);
+}
+EXPORT_SYMBOL(gic_set_cpu);
#endif
static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
@@ -787,18 +798,19 @@ static int gic_pm_init(struct gic_chip_data *gic)
#endif
#ifdef CONFIG_SMP
-static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
int cpu;
unsigned long flags, map = 0;
+#if 0
if (unlikely(nr_cpu_ids == 1)) {
/* Only one CPU? let's do a self-IPI... */
writel_relaxed(2 << 24 | irq,
gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
return;
}
-
+#endif
gic_lock_irqsave(flags);
/* Convert our logical CPU mask into a physical one. */
@@ -816,6 +828,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
gic_unlock_irqrestore(flags);
}
+EXPORT_SYMBOL(gic_raise_softirq);
#endif
#ifdef CONFIG_BL_SWITCHER
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index e3043ded8973..43d6e4e705f5 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -15,10 +15,11 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/of_address.h>
#include <linux/io.h>
-#include <linux/jump_label.h>
#include <linux/bug.h>
#include <linux/of_irq.h>
+static struct xintc_irq_chip *primary_intc;
+
/* No one else should require these constants, so define them locally here. */
#define ISR 0x00 /* Interrupt Status Register */
#define IPR 0x04 /* Interrupt Pending Register */
@@ -32,35 +33,40 @@
#define MER_ME (1<<0)
#define MER_HIE (1<<1)
-static DEFINE_STATIC_KEY_FALSE(xintc_is_be);
-
struct xintc_irq_chip {
void __iomem *base;
struct irq_domain *root_domain;
u32 intr_mask;
+ struct irq_chip *intc_dev;
+ u32 nr_irq;
+ unsigned int (*read_fn)(void __iomem *addr);
+ void (*write_fn)(void __iomem *addr, u32);
};
-static struct xintc_irq_chip *xintc_irqc;
+static void xintc_write(void __iomem *addr, u32 data)
+{
+ iowrite32(data, addr);
+}
+
+static unsigned int xintc_read(void __iomem *addr)
+{
+ return ioread32(addr);
+}
-static void xintc_write(int reg, u32 data)
+static void xintc_write_be(void __iomem *addr, u32 data)
{
- if (static_branch_unlikely(&xintc_is_be))
- iowrite32be(data, xintc_irqc->base + reg);
- else
- iowrite32(data, xintc_irqc->base + reg);
+ iowrite32be(data, addr);
}
-static unsigned int xintc_read(int reg)
+static unsigned int xintc_read_be(void __iomem *addr)
{
- if (static_branch_unlikely(&xintc_is_be))
- return ioread32be(xintc_irqc->base + reg);
- else
- return ioread32(xintc_irqc->base + reg);
+ return ioread32be(addr);
}
static void intc_enable_or_unmask(struct irq_data *d)
{
unsigned long mask = 1 << d->hwirq;
+ struct xintc_irq_chip *local_intc = irq_data_get_irq_chip_data(d);
pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
@@ -69,47 +75,57 @@ static void intc_enable_or_unmask(struct irq_data *d)
* acks the irq before calling the interrupt handler
*/
if (irqd_is_level_type(d))
- xintc_write(IAR, mask);
+ local_intc->write_fn(local_intc->base + IAR, mask);
- xintc_write(SIE, mask);
+ local_intc->write_fn(local_intc->base + SIE, mask);
}
static void intc_disable_or_mask(struct irq_data *d)
{
+ struct xintc_irq_chip *local_intc = irq_data_get_irq_chip_data(d);
+
pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
- xintc_write(CIE, 1 << d->hwirq);
+ local_intc->write_fn(local_intc->base + CIE, 1 << d->hwirq);
}
static void intc_ack(struct irq_data *d)
{
+ struct xintc_irq_chip *local_intc = irq_data_get_irq_chip_data(d);
+
pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
- xintc_write(IAR, 1 << d->hwirq);
+ local_intc->write_fn(local_intc->base + IAR, 1 << d->hwirq);
}
static void intc_mask_ack(struct irq_data *d)
{
unsigned long mask = 1 << d->hwirq;
+ struct xintc_irq_chip *local_intc = irq_data_get_irq_chip_data(d);
pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
- xintc_write(CIE, mask);
- xintc_write(IAR, mask);
+ local_intc->write_fn(local_intc->base + CIE, mask);
+ local_intc->write_fn(local_intc->base + IAR, mask);
}
-static struct irq_chip intc_dev = {
- .name = "Xilinx INTC",
- .irq_unmask = intc_enable_or_unmask,
- .irq_mask = intc_disable_or_mask,
- .irq_ack = intc_ack,
- .irq_mask_ack = intc_mask_ack,
-};
+static unsigned int xintc_get_irq_local(struct xintc_irq_chip *local_intc)
+{
+ int hwirq, irq = -1;
+
+ hwirq = local_intc->read_fn(local_intc->base + IVR);
+ if (hwirq != -1U)
+ irq = irq_find_mapping(local_intc->root_domain, hwirq);
+
+ pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
+
+ return irq;
+}
unsigned int xintc_get_irq(void)
{
- unsigned int hwirq, irq = -1;
+ int hwirq, irq = -1;
- hwirq = xintc_read(IVR);
+ hwirq = primary_intc->read_fn(primary_intc->base + IVR);
if (hwirq != -1U)
- irq = irq_find_mapping(xintc_irqc->root_domain, hwirq);
+ irq = irq_find_mapping(primary_intc->root_domain, hwirq);
pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
@@ -118,15 +134,18 @@ unsigned int xintc_get_irq(void)
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
- if (xintc_irqc->intr_mask & (1 << hw)) {
- irq_set_chip_and_handler_name(irq, &intc_dev,
+ struct xintc_irq_chip *local_intc = d->host_data;
+
+ if (local_intc->intr_mask & (1 << hw)) {
+ irq_set_chip_and_handler_name(irq, local_intc->intc_dev,
handle_edge_irq, "edge");
irq_clear_status_flags(irq, IRQ_LEVEL);
} else {
- irq_set_chip_and_handler_name(irq, &intc_dev,
+ irq_set_chip_and_handler_name(irq, local_intc->intc_dev,
handle_level_irq, "level");
irq_set_status_flags(irq, IRQ_LEVEL);
}
+ irq_set_chip_data(irq, local_intc);
return 0;
}
@@ -138,11 +157,13 @@ static const struct irq_domain_ops xintc_irq_domain_ops = {
static void xil_intc_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct xintc_irq_chip *local_intc =
+ irq_data_get_irq_handler_data(&desc->irq_data);
u32 pending;
chained_irq_enter(chip, desc);
do {
- pending = xintc_get_irq();
+ pending = xintc_get_irq_local(local_intc);
if (pending == -1U)
break;
generic_handle_irq(pending);
@@ -153,28 +174,20 @@ static void xil_intc_irq_handler(struct irq_desc *desc)
static int __init xilinx_intc_of_init(struct device_node *intc,
struct device_node *parent)
{
- u32 nr_irq;
int ret, irq;
struct xintc_irq_chip *irqc;
-
- if (xintc_irqc) {
- pr_err("irq-xilinx: Multiple instances aren't supported\n");
- return -EINVAL;
- }
+ struct irq_chip *intc_dev;
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
if (!irqc)
return -ENOMEM;
-
- xintc_irqc = irqc;
-
irqc->base = of_iomap(intc, 0);
BUG_ON(!irqc->base);
- ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
+ ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &irqc->nr_irq);
if (ret < 0) {
pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
- goto err_alloc;
+ goto error;
}
ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
@@ -183,30 +196,45 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
irqc->intr_mask = 0;
}
- if (irqc->intr_mask >> nr_irq)
+ if (irqc->intr_mask >> irqc->nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
- intc, nr_irq, irqc->intr_mask);
+ intc, irqc->nr_irq, irqc->intr_mask);
+
+ intc_dev = kzalloc(sizeof(*intc_dev), GFP_KERNEL);
+ if (!intc_dev) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ intc_dev->name = intc->full_name;
+ intc_dev->irq_unmask = intc_enable_or_unmask,
+ intc_dev->irq_mask = intc_disable_or_mask,
+ intc_dev->irq_ack = intc_ack,
+ intc_dev->irq_mask_ack = intc_mask_ack,
+ irqc->intc_dev = intc_dev;
+ irqc->write_fn = xintc_write;
+ irqc->read_fn = xintc_read;
/*
* Disable all external interrupts until they are
* explicity requested.
*/
- xintc_write(IER, 0);
+ irqc->write_fn(irqc->base + IER, 0);
/* Acknowledge any pending interrupts just in case. */
- xintc_write(IAR, 0xffffffff);
+ irqc->write_fn(irqc->base + IAR, 0xffffffff);
/* Turn on the Master Enable. */
- xintc_write(MER, MER_HIE | MER_ME);
- if (!(xintc_read(MER) & (MER_HIE | MER_ME))) {
- static_branch_enable(&xintc_is_be);
- xintc_write(MER, MER_HIE | MER_ME);
+ irqc->write_fn(irqc->base + MER, MER_HIE | MER_ME);
+ if (!(irqc->read_fn(irqc->base + MER) & (MER_HIE | MER_ME))) {
+ irqc->write_fn = xintc_write_be;
+ irqc->read_fn = xintc_read_be;
+ irqc->write_fn(irqc->base + MER, MER_HIE | MER_ME);
}
- irqc->root_domain = irq_domain_add_linear(intc, nr_irq,
+ irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq,
&xintc_irq_domain_ops, irqc);
if (!irqc->root_domain) {
pr_err("irq-xilinx: Unable to create IRQ domain\n");
@@ -225,13 +253,16 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
goto err_alloc;
}
} else {
- irq_set_default_host(irqc->root_domain);
+ primary_intc = irqc;
+ irq_set_default_host(primary_intc->root_domain);
}
return 0;
err_alloc:
- xintc_irqc = NULL;
+ kfree(intc_dev);
+error:
+ iounmap(irqc->base);
kfree(irqc);
return ret;
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 2ad6bdf1a9fc..94c1ebcc6de8 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -52,6 +52,7 @@ MODULE_LICENSE("GPL v2");
#define ADV7511_MAX_HEIGHT 1200
#define ADV7511_MIN_PIXELCLOCK 20000000
#define ADV7511_MAX_PIXELCLOCK 225000000
+#define XYLON_LOGICVC_INTG
#define ADV7511_MAX_ADDRS (3)
@@ -79,7 +80,50 @@ struct adv7511_state_edid {
bool complete;
};
+struct adv7511_in_params {
+ uint8_t input_id;
+ uint8_t input_style;
+ uint8_t input_color_depth;
+ uint8_t bit_justification;
+ uint8_t hsync_polarity;
+ uint8_t vsync_polarity;
+ uint8_t clock_delay;
+};
+
+struct adv7511_csc_coeff {
+ uint16_t a1;
+ uint16_t a2;
+ uint16_t a3;
+ uint16_t a4;
+ uint16_t b1;
+ uint16_t b2;
+ uint16_t b3;
+ uint16_t b4;
+ uint16_t c1;
+ uint16_t c2;
+ uint16_t c3;
+ uint16_t c4;
+};
+
+struct adv7511_out_params {
+ bool hdmi_mode;
+ uint8_t output_format;
+ uint8_t output_color_space;
+ uint8_t up_conversion;
+ uint8_t csc_enable;
+ uint8_t csc_scaling_factor;
+ struct adv7511_csc_coeff csc_coeff;
+};
+
+struct adv7511_config {
+ struct adv7511_in_params in_params;
+ struct adv7511_out_params out_params;
+ bool embedded_sync;
+ bool loaded;
+};
+
struct adv7511_state {
+ struct adv7511_config cfg;
struct adv7511_platform_data pdata;
struct v4l2_subdev sd;
struct media_pad pad;
@@ -379,6 +423,10 @@ static void adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l
{
struct adv7511_state *state = get_adv7511_state(sd);
+#ifdef XYLON_LOGICVC_INTG
+ return 0;
+#endif
+
/* Only makes sense for RGB formats */
if (state->fmt_code != MEDIA_BUS_FMT_RGB888_1X24) {
/* so just keep quantization */
@@ -1527,34 +1575,278 @@ static void adv7511_audio_setup(struct v4l2_subdev *sd)
adv7511_s_routing(sd, 0, 0, 0);
}
-/* Configure hdmi transmitter. */
-static void adv7511_setup(struct v4l2_subdev *sd)
+static void adv7511_set_ofdt_config(struct v4l2_subdev *sd)
{
struct adv7511_state *state = get_adv7511_state(sd);
+ struct adv7511_config *config = &state->cfg;
+ uint8_t val_mask, val;
v4l2_dbg(1, debug, sd, "%s\n", __func__);
- /* Input format: RGB 4:4:4 */
- adv7511_wr_and_or(sd, 0x15, 0xf0, 0x0);
- /* Output format: RGB 4:4:4 */
- adv7511_wr_and_or(sd, 0x16, 0x7f, 0x0);
- /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */
- adv7511_wr_and_or(sd, 0x17, 0xf9, 0x06);
- /* Disable pixel repetition */
- adv7511_wr_and_or(sd, 0x3b, 0x9f, 0x0);
- /* Disable CSC */
- adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
- /* Output format: RGB 4:4:4, Active Format Information is valid,
- * underscanned */
- adv7511_wr_and_or(sd, 0x55, 0x9c, 0x12);
- /* AVI Info frame packet enable, Audio Info frame disable */
+ /* Input format */
+ val_mask = 0;
+ switch (config->in_params.input_id) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ case 2:
+ val = 0x02;
+ config->embedded_sync = true;
+ break;
+ case 3:
+ val = 0x03;
+ break;
+ case 4:
+ val = 0x04;
+ config->embedded_sync = true;
+ break;
+ case 5:
+ val = 0x05;
+ break;
+ case 6:
+ val = 0x06;
+ break;
+ case 7:
+ val = 0x07;
+ break;
+ case 8:
+ val = 0x08;
+ config->embedded_sync = true;
+ break;
+ }
+ val_mask |= val;
+ adv7511_wr(sd, 0x15, val_mask);
+
+ /* Output format */
+ val_mask = 0;
+ switch (config->out_params.output_color_space) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 0);
+ switch (config->in_params.input_style) {
+ case 1:
+ val = 0x02;
+ break;
+ case 2:
+ val = 0x01;
+ break;
+ case 3:
+ val = 0x03;
+ break;
+ default:
+ val = 0x00;
+ break;
+ }
+ val_mask |= (val << 2);
+ switch (config->in_params.input_color_depth) {
+ case 8:
+ val = 0x03;
+ break;
+ case 10:
+ val = 0x01;
+ break;
+ case 12:
+ val = 0x02;
+ break;
+ default:
+ val = 0x00;
+ break;
+ }
+ val_mask |= (val << 4);
+ switch (config->out_params.output_format) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 7);
+ adv7511_wr(sd, 0x16, val_mask);
+
+ /* H, V sync polarity, interpolation style */
+ val_mask = 0;
+ switch (config->out_params.up_conversion) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 2);
+ switch (config->in_params.hsync_polarity) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 5);
+ switch (config->in_params.vsync_polarity) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 6);
+ adv7511_wr(sd, 0x17, val_mask);
+
+ /* CSC mode, CSC coefficients */
+ if (config->out_params.csc_enable) {
+ switch (config->out_params.csc_scaling_factor) {
+ case 1:
+ val = 0x00;
+ break;
+ case 2:
+ val = 0x01;
+ break;
+ case 4:
+ default:
+ val = 0x02;
+ break;
+ }
+ adv7511_csc_conversion_mode(sd, val);
+ adv7511_csc_coeff(sd,
+ config->out_params.csc_coeff.a1,
+ config->out_params.csc_coeff.a2,
+ config->out_params.csc_coeff.a3,
+ config->out_params.csc_coeff.a4,
+ config->out_params.csc_coeff.b1,
+ config->out_params.csc_coeff.b2,
+ config->out_params.csc_coeff.b3,
+ config->out_params.csc_coeff.b4,
+ config->out_params.csc_coeff.c1,
+ config->out_params.csc_coeff.c2,
+ config->out_params.csc_coeff.c3,
+ config->out_params.csc_coeff.c4);
+ /* enable CSC */
+ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x80);
+ /* AVI infoframe: Limited range RGB (16-235) */
+ adv7511_wr_and_or(sd, 0x57, 0xf3, 0x04);
+ }
+
+ /* AVI Info, Audio Info */
adv7511_wr_and_or(sd, 0x44, 0xe7, 0x10);
- /* Colorimetry, Active format aspect ratio: same as picure. */
- adv7511_wr(sd, 0x56, 0xa8);
- /* No encryption */
- adv7511_wr_and_or(sd, 0xaf, 0xed, 0x0);
- /* Positive clk edge capture for input video clock */
- adv7511_wr_and_or(sd, 0xba, 0x1f, 0x60);
+ /* Video input justification */
+ val_mask = 0;
+ switch (config->in_params.bit_justification) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ case 2:
+ val = 0x02;
+ break;
+ }
+ val_mask |= (val << 3);
+ adv7511_wr(sd, 0x48, val_mask);
+
+ /* Output format */
+ val_mask = 0x00;
+ if (config->out_params.output_format == 1) {
+ if (config->out_params.output_color_space == 0)
+ val_mask = 0x02;
+ else if (config->out_params.output_format == 1)
+ val_mask = 0x01;
+ }
+ val_mask <<= 5;
+ adv7511_wr(sd, 0x55, val_mask);
+
+ /* Picture format aspect ratio */
+ adv7511_wr(sd, 0x56, 0x28);
+
+ /* HDCP, Frame encryption, HDMI/DVI */
+ val_mask = 0x04;
+ if (config->out_params.hdmi_mode)
+ val_mask |= 0x02;
+ adv7511_wr(sd, 0xaf, val_mask);
+
+ /* Capture for input video clock */
+ val_mask = 0;
+ switch (config->in_params.clock_delay) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ case 2:
+ val = 0x02;
+ break;
+ case 3:
+ val = 0x03;
+ break;
+ case 4:
+ val = 0x04;
+ break;
+ case 5:
+ val = 0x05;
+ break;
+ case 6:
+ val = 0x06;
+ break;
+ case 7:
+ val = 0x07;
+ break;
+ }
+ val_mask |= (val << 5);
+ adv7511_wr_and_or(sd, 0xba, 0x1f, val_mask);
+}
+
+/* Configure hdmi transmitter. */
+static void adv7511_setup(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ v4l2_dbg(1, debug, sd, "%s\n", __func__);
+
+ if (!state->cfg.loaded) {
+ /* Input format: RGB 4:4:4 */
+ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x0);
+ /* Output format: RGB 4:4:4 */
+ adv7511_wr_and_or(sd, 0x16, 0x7f, 0x0);
+ /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */
+ adv7511_wr_and_or(sd, 0x17, 0xf9, 0x06);
+ /* Disable pixel repetition */
+ adv7511_wr_and_or(sd, 0x3b, 0x9f, 0x0);
+ /* Disable CSC */
+ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
+ /* Output format: RGB 4:4:4, Active Format Information is valid,
+ * underscanned */
+ adv7511_wr_and_or(sd, 0x55, 0x9c, 0x12);
+ /* AVI Info frame packet enable, Audio Info frame disable */
+ adv7511_wr_and_or(sd, 0x44, 0xe7, 0x10);
+ /* Colorimetry, Active format aspect ratio: same as picure. */
+ adv7511_wr(sd, 0x56, 0xa8);
+ /* No encryption */
+ adv7511_wr_and_or(sd, 0xaf, 0xed, 0x0);
+
+ /* Positive clk edge capture for input video clock */
+ adv7511_wr_and_or(sd, 0xba, 0x1f, 0x60);
+ } else {
+ adv7511_set_ofdt_config(sd);
+ }
adv7511_audio_setup(sd);
@@ -1792,6 +2084,181 @@ static void adv7511_init_setup(struct v4l2_subdev *sd)
adv7511_cec_write(sd, 0x4e, ratio << 2);
}
+
+static void adv7511_get_ofdt_config(struct i2c_client *client,
+ struct adv7511_state *state)
+{
+ struct device_node *dn = client->dev.of_node;
+ struct device_node *np;
+ struct adv7511_config *config = &state->cfg;
+ u32 const *prop;
+ int size;
+ bool vin_loaded, vout_loaded;
+
+ vin_loaded = vout_loaded = false;
+
+ prop = of_get_property(dn, "edid-addr", &size);
+ if (prop)
+ state->pdata.i2c_edid = (uint8_t)be32_to_cpup(prop);
+
+ prop = of_get_property(dn, "pktmem-addr", &size);
+ if (prop)
+ state->pdata.i2c_pktmem = (uint8_t)be32_to_cpup(prop);
+
+ prop = of_get_property(dn, "cec-addr", &size);
+ if (prop)
+ state->pdata.i2c_cec = (uint8_t)be32_to_cpup(prop);
+
+ np = of_find_node_by_name(dn, "video-input");
+ if (np) {
+ prop = of_get_property(np, "input-id", &size);
+ if (prop)
+ config->in_params.input_id =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "input-style", &size);
+ if (prop)
+ config->in_params.input_style =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "input-color-depth", &size);
+ if (prop)
+ config->in_params.input_color_depth =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "bit-justification", &size);
+ if (prop)
+ config->in_params.bit_justification =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "hsync-polarity", &size);
+ if (prop)
+ config->in_params.hsync_polarity =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "vsync-polarity", &size);
+ if (prop)
+ config->in_params.vsync_polarity =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "clock-delay", &size);
+ if (prop)
+ config->in_params.clock_delay =
+ (uint8_t)be32_to_cpup(prop);
+ vin_loaded = true;
+ } else {
+ pr_info("No video input configuration, using device default\n");
+ }
+
+ np = of_find_node_by_name(dn, "video-output");
+ if (np) {
+ prop = of_get_property(np, "hdmi-mode", &size);
+ if (prop) {
+ if (be32_to_cpup(prop) == 1)
+ config->out_params.hdmi_mode = true;
+ }
+ prop = of_get_property(np, "output-format", &size);
+ if (prop)
+ config->out_params.output_format =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "output-color-space", &size);
+ if (prop)
+ config->out_params.output_color_space =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "up-conversion", &size);
+ if (prop)
+ config->out_params.up_conversion =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "csc-enable", &size);
+ if (prop)
+ config->out_params.csc_enable =
+ (uint8_t)be32_to_cpup(prop);
+ if (config->out_params.csc_enable) {
+ prop = of_get_property(np, "csc-scaling-factor", &size);
+ if (prop) {
+ config->out_params.csc_scaling_factor =
+ (uint8_t)be32_to_cpup(prop);
+ }
+ np = of_find_node_by_name(dn, "csc-coefficients");
+ if (np) {
+ prop = of_get_property(np, "a1", &size);
+ if (prop) {
+ config->out_params.csc_coeff.a1 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "a2", &size);
+ if (prop) {
+ config->out_params.csc_coeff.a2 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "a3", &size);
+ if (prop) {
+ config->out_params.csc_coeff.a3 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "a4", &size);
+ if (prop) {
+ config->out_params.csc_coeff.a4 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "b1", &size);
+ if (prop) {
+ config->out_params.csc_coeff.b1 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "b2", &size);
+ if (prop) {
+ config->out_params.csc_coeff.b2 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "b3", &size);
+ if (prop) {
+ config->out_params.csc_coeff.b3 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "b4", &size);
+ if (prop) {
+ config->out_params.csc_coeff.b4 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "c1", &size);
+ if (prop) {
+ config->out_params.csc_coeff.c1 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "c2", &size);
+ if (prop) {
+ config->out_params.csc_coeff.c2 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "c3", &size);
+ if (prop) {
+ config->out_params.csc_coeff.c3 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "c4", &size);
+ if (prop) {
+ config->out_params.csc_coeff.c4 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ } else {
+ pr_info("No CSC coefficients, using default\n");
+ }
+ }
+ vout_loaded = true;
+ } else {
+ pr_info("No video output configuration, using device default\n");
+ }
+
+ if (vin_loaded && vout_loaded)
+ config->loaded = true;
+}
+
+struct v4l2_subdev *adv7511_subdev(struct v4l2_subdev *sd)
+{
+ static struct v4l2_subdev *subdev;
+
+ if (sd)
+ subdev = sd;
+
+ return subdev;
+}
+EXPORT_SYMBOL(adv7511_subdev);
+
static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct adv7511_state *state;
@@ -1809,11 +2276,17 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
if (!state)
return -ENOMEM;
- /* Platform data */
- if (!pdata) {
- v4l_err(client, "No platform data!\n");
- return -ENODEV;
+ if (client->dev.of_node) {
+ adv7511_get_ofdt_config(client, state);
+ } else {
+ /* Platform data */
+ if (!pdata) {
+ v4l_err(client, "No platform data!\n");
+ return -ENODEV;
+ }
+ memcpy(&state->pdata, pdata, sizeof(state->pdata));
}
+
memcpy(&state->pdata, pdata, sizeof(state->pdata));
state->fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
state->colorspace = V4L2_COLORSPACE_SRGB;
@@ -1824,6 +2297,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
client->addr << 1);
v4l2_i2c_subdev_init(sd, client, &adv7511_ops);
+ adv7511_subdev(sd);
sd->internal_ops = &adv7511_int_ops;
hdl = &state->hdl;
@@ -1917,7 +2391,9 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
INIT_DELAYED_WORK(&state->edid_handler, adv7511_edid_handler);
+#ifndef XYLON_LOGICVC_INTG
adv7511_init_setup(sd);
+#endif
#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
@@ -1931,8 +2407,9 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
#endif
adv7511_set_isr(sd, true);
+#ifndef XYLON_LOGICVC_INTG
adv7511_check_monitor_present_status(sd);
-
+#endif
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
return 0;
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 7fb8cd22101e..a70870a2d07f 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -44,6 +44,7 @@
#define OV5640_REG_SC_PLL_CTRL1 0x3035
#define OV5640_REG_SC_PLL_CTRL2 0x3036
#define OV5640_REG_SC_PLL_CTRL3 0x3037
+#define OV5640_REG_SC_PLLS_CTRL3 0x303d
#define OV5640_REG_SLAVE_ID 0x3100
#define OV5640_REG_SCCB_SYS_CTRL1 0x3103
#define OV5640_REG_SYS_ROOT_DIVIDER 0x3108
@@ -85,6 +86,7 @@
#define OV5640_REG_POLARITY_CTRL00 0x4740
#define OV5640_REG_MIPI_CTRL00 0x4800
#define OV5640_REG_DEBUG_MODE 0x4814
+#define OV5640_REG_PCLK_PERIOD 0x4837
#define OV5640_REG_ISP_FORMAT_MUX_CTRL 0x501f
#define OV5640_REG_PRE_ISP_TEST_SET1 0x503d
#define OV5640_REG_SDE_CTRL0 0x5580
@@ -132,6 +134,8 @@ static const struct ov5640_pixfmt ov5640_formats[] = {
{ MEDIA_BUS_FMT_JPEG_1X8, V4L2_COLORSPACE_JPEG, },
{ MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_SRGB, },
+ { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_COLORSPACE_SRGB, },
+ { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB, },
@@ -183,6 +187,7 @@ struct reg_value {
struct ov5640_mode_info {
enum ov5640_mode_id id;
enum ov5640_downsize_mode dn_mode;
+ bool scaler; /* Mode uses ISP scaler (reg 0x5001,BIT(5)=='1') */
u32 hact;
u32 htot;
u32 vact;
@@ -302,7 +307,7 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
{0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0},
{0x501f, 0x00, 0, 0}, {0x4407, 0x04, 0, 0},
{0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x4837, 0x0a, 0, 0}, {0x3824, 0x02, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x482a, 0x06, 0, 0},
{0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0},
{0x5181, 0xf2, 0, 0}, {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0},
{0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0},
@@ -541,7 +546,7 @@ static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = {
/* power-on sensor init reg table */
static const struct ov5640_mode_info ov5640_mode_init_data = {
- 0, SUBSAMPLING, 640, 1896, 480, 984,
+ 0, SUBSAMPLING, 0, 640, 1896, 480, 984,
ov5640_init_setting_30fps_VGA,
ARRAY_SIZE(ov5640_init_setting_30fps_VGA),
};
@@ -1066,6 +1071,222 @@ static int ov5640_set_jpeg_timings(struct ov5640_dev *sensor,
return ov5640_write_reg16(sensor, OV5640_REG_VFIFO_VSIZE, mode->vact);
}
+/*
+ *
+ * The current best guess of the clock tree, as reverse engineered by several
+ * people on the media mailing list:
+ *
+ * +--------------+
+ * | Ext. Clock |
+ * +------+-------+
+ * |
+ * +------+-------+ - reg 0x3037[3:0] for the pre-divider
+ * | System PLL | - reg 0x3036 for the multiplier
+ * +--+-----------+ - reg 0x3035[7:4] for the system divider
+ * |
+ * | +--------------+
+ * |---+ MIPI Rate | - reg 0x3035[3:0] for the MIPI root divider
+ * | +--------------+
+ * |
+ * +--+-----------+
+ * | PLL Root Div | - (reg 0x3037[4])+1 for the root divider
+ * +--+-----------+
+ * |
+ * +------+-------+
+ * | MIPI Bit Div | - reg 0x3034[3:0]/4 for divider when in MIPI mode, else 1
+ * +--+-----------+
+ * |
+ * | +--------------+
+ * |---+ SCLK | - log2(reg 0x3108[1:0]) for the root divider
+ * | +--------------+
+ * |
+ * +--+-----------+ - reg 0x3035[3:0] for the MIPI root divider
+ * | PCLK | - log2(reg 0x3108[5:4]) for the DVP root divider
+ * +--------------+
+ *
+ * Not all limitations of register values are documented above, see ov5640
+ * datasheet.
+ *
+ * In order for the sensor to operate correctly the ratio of
+ * SCLK:PCLK:MIPI RATE must be 1:2:8 when the scalar in the ISP is not
+ * enabled, and 1:1:4 when it is enabled (MIPI rate doesn't matter in DVP mode).
+ * The ratio of these different clocks is maintained by the constant div values
+ * below, with PCLK div being selected based on if the mode is using the scalar.
+ */
+
+/*
+ * This is supposed to be ranging from 1 to 16, but the value is
+ * always set to either 1 or 2 in the vendor kernels.
+ */
+#define OV5640_SYSDIV_MIN 1
+#define OV5640_SYSDIV_MAX 12
+
+/*
+ * This is supposed to be ranging from 1 to 8, but the value is always
+ * set to 3 in the vendor kernels.
+ */
+#define OV5640_PLL_PREDIV 2
+
+/*
+ *This is supposed to be ranging from 4-252, but must be even when >127
+ */
+#define OV5640_PLL_MULT_MIN 4
+#define OV5640_PLL_MULT_MAX 252
+
+/*
+ * This is supposed to be ranging from 1 to 2, but the value is always
+ * set to 1 in the vendor kernels.
+ */
+#define OV5640_PLL_DVP_ROOT_DIV 1
+#define OV5640_PLL_MIPI_ROOT_DIV 2
+
+/*
+ * This is supposed to be ranging from 1 to 8, but the value is always
+ * set to 2 in the vendor kernels.
+ */
+#define OV5640_SCLK_ROOT_DIV 2
+
+/*
+ * This is equal to the MIPI bit rate divided by 4. Now it is hardcoded to
+ * only work with 8-bit formats, so this value will need to be set in
+ * software if support for 10-bit formats is added. The bit divider is
+ * only active when in MIPI mode (not DVP)
+ */
+#define OV5640_BIT_DIV 2
+
+static unsigned long ov5640_compute_sclk(struct ov5640_dev *sensor,
+ u8 sys_div, u8 pll_prediv,
+ u8 pll_mult, u8 pll_div,
+ u8 sclk_div)
+{
+ unsigned long rate = clk_get_rate(sensor->xclk);
+
+ rate = rate / pll_prediv * pll_mult / sys_div / pll_div;
+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
+ rate = rate / OV5640_BIT_DIV;
+
+ return rate / sclk_div;
+}
+
+static unsigned long ov5640_calc_sclk(struct ov5640_dev *sensor,
+ unsigned long rate,
+ u8 *sysdiv, u8 *prediv, u8 pll_rdiv,
+ u8 *mult, u8 *sclk_rdiv)
+{
+ unsigned long best = ~0;
+ u8 best_sysdiv = 1, best_mult = 1;
+ u8 _sysdiv, _pll_mult;
+
+ for (_sysdiv = OV5640_SYSDIV_MIN;
+ _sysdiv <= OV5640_SYSDIV_MAX;
+ _sysdiv++) {
+ for (_pll_mult = OV5640_PLL_MULT_MIN;
+ _pll_mult <= OV5640_PLL_MULT_MAX;
+ _pll_mult++) {
+ unsigned long _rate;
+
+ /*
+ * The PLL multiplier cannot be odd if above
+ * 127.
+ */
+ if (_pll_mult > 127 && (_pll_mult % 2))
+ continue;
+
+ _rate = ov5640_compute_sclk(sensor, _sysdiv,
+ OV5640_PLL_PREDIV,
+ _pll_mult,
+ pll_rdiv,
+ OV5640_SCLK_ROOT_DIV);
+
+ if (abs(rate - _rate) < abs(rate - best)) {
+ best = _rate;
+ best_sysdiv = _sysdiv;
+ best_mult = _pll_mult;
+ }
+
+ if (_rate == rate)
+ goto out;
+ if (_rate > rate)
+ break;
+ }
+ }
+
+out:
+ *sysdiv = best_sysdiv;
+ *prediv = OV5640_PLL_PREDIV;
+ *mult = best_mult;
+ *sclk_rdiv = OV5640_SCLK_ROOT_DIV;
+ return best;
+}
+
+static int ov5640_set_sclk(struct ov5640_dev *sensor,
+ const struct ov5640_mode_info *mode)
+{
+ u8 sysdiv, prediv, mult, pll_rdiv, sclk_rdiv, mipi_div, pclk_div;
+ u8 pclk_period;
+ int ret;
+ unsigned long sclk, rate, pclk;
+ unsigned char bpp;
+
+ /*
+ * All the formats we support have 2 bytes per pixel, except for JPEG
+ * which is 1 byte per pixel.
+ */
+ bpp = sensor->fmt.code == MEDIA_BUS_FMT_JPEG_1X8 ? 1 : 2;
+ rate = mode->vtot * mode->htot * bpp;
+ rate *= ov5640_framerates[sensor->current_fr];
+
+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
+ rate = rate / sensor->ep.bus.mipi_csi2.num_data_lanes;
+
+ pll_rdiv = (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) ?
+ OV5640_PLL_MIPI_ROOT_DIV : OV5640_PLL_DVP_ROOT_DIV;
+
+ sclk = ov5640_calc_sclk(sensor, rate, &sysdiv, &prediv, pll_rdiv,
+ &mult, &sclk_rdiv);
+
+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
+ mipi_div = (sensor->current_mode->scaler) ? 2 : 1;
+ pclk_div = 1;
+
+ /*
+ * Calculate pclk period * number of CSI2 lanes in ns for MIPI
+ * timing.
+ */
+ pclk = sclk * sclk_rdiv / mipi_div;
+ pclk_period = (u8)((1000000000UL + pclk / 2UL) / pclk);
+ pclk_period = pclk_period *
+ sensor->ep.bus.mipi_csi2.num_data_lanes;
+ ret = ov5640_write_reg(sensor, OV5640_REG_PCLK_PERIOD,
+ pclk_period);
+ if (ret)
+ return ret;
+ } else {
+ mipi_div = 1;
+ pclk_div = (sensor->current_mode->scaler) ? 2 : 1;
+ }
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL1,
+ 0xff, (sysdiv << 4) | (mipi_div & 0x0f));
+ if (ret)
+ return ret;
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL2,
+ 0xff, mult);
+ if (ret)
+ return ret;
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL3,
+ 0x1f, prediv | ((pll_rdiv - 1) << 4));
+ if (ret)
+ return ret;
+
+ return ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, 0x3F,
+ (ilog2(pclk_div) << 4) |
+ (ilog2(sclk_rdiv / 2) << 2) |
+ ilog2(sclk_rdiv));
+}
+
/* download ov5640 settings to sensor through i2c */
static int ov5640_set_timings(struct ov5640_dev *sensor,
const struct ov5640_mode_info *mode)
@@ -1448,6 +1669,7 @@ static int ov5640_get_light_freq(struct ov5640_dev *sensor)
light_freq = 50;
} else {
/* 60Hz */
+ light_freq = 60;
}
}
@@ -1665,6 +1887,11 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor,
if (ret < 0)
return ret;
+ /* Set PLL registers for new mode */
+ ret = ov5640_set_sclk(sensor, mode);
+ if (ret < 0)
+ return ret;
+
/* Write capture setting */
ret = ov5640_load_regs(sensor, mode);
if (ret < 0)
@@ -1786,9 +2013,16 @@ static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor,
static int ov5640_set_mode_direct(struct ov5640_dev *sensor,
const struct ov5640_mode_info *mode)
{
+ int ret;
+
if (!mode->reg_data)
return -EINVAL;
+ /* Set PLL registers for new mode */
+ ret = ov5640_set_sclk(sensor, mode);
+ if (ret < 0)
+ return ret;
+
/* Write capture setting */
return ov5640_load_regs(sensor, mode);
}
@@ -2252,11 +2486,13 @@ static int ov5640_set_framefmt(struct ov5640_dev *sensor,
switch (format->code) {
case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
/* YUV422, UYVY */
fmt = 0x3f;
mux = OV5640_FMT_MUX_YUV422;
break;
case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
/* YUV422, YUYV */
fmt = 0x30;
mux = OV5640_FMT_MUX_YUV422;
@@ -2572,6 +2808,13 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
/* v4l2_ctrl_lock() locks our own mutex */
+ /*
+ * If the sensor is not powered up by the host driver, do
+ * not try to access it to update the volatile controls.
+ */
+ if (sensor->power_count == 0)
+ return 0;
+
switch (ctrl->id) {
case V4L2_CID_AUTOGAIN:
val = ov5640_get_gain(sensor);
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 7c429ce98bae..7457f6fd4a85 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -264,7 +264,36 @@ static struct media_entity *stack_pop(struct media_graph *graph)
#define stack_top(en) ((en)->stack[(en)->top].entity)
/**
- * media_graph_walk_init - Allocate resources for graph walk
+ * media_entity_has_route - Check if two entity pads are connected internally
+ * @entity: The entity
+ * @pad0: The first pad index
+ * @pad1: The second pad index
+ *
+ * This function can be used to check whether two pads of an entity are
+ * connected internally in the entity.
+ *
+ * The caller must hold entity->source->parent->mutex.
+ *
+ * Return: true if the pads are connected internally and false otherwise.
+ */
+bool media_entity_has_route(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1)
+{
+ if (pad0 >= entity->num_pads || pad1 >= entity->num_pads)
+ return false;
+
+ if (pad0 == pad1)
+ return true;
+
+ if (!entity->ops || !entity->ops->has_route)
+ return true;
+
+ return entity->ops->has_route(entity, pad0, pad1);
+}
+EXPORT_SYMBOL_GPL(media_entity_has_route);
+
+/**
+ * media_entity_graph_walk_init - Allocate resources for graph walk
* @graph: Media graph structure that will be used to walk the graph
* @mdev: Media device
*
diff --git a/drivers/media/platform/xilinx/Kconfig b/drivers/media/platform/xilinx/Kconfig
index a2773ad7c185..f21796d175a2 100644
--- a/drivers/media/platform/xilinx/Kconfig
+++ b/drivers/media/platform/xilinx/Kconfig
@@ -4,23 +4,153 @@ config VIDEO_XILINX
tristate "Xilinx Video IP (EXPERIMENTAL)"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA
select VIDEOBUF2_DMA_CONTIG
+ select XILINX_FRMBUF
select V4L2_FWNODE
help
Driver for Xilinx Video IP Pipelines
if VIDEO_XILINX
+config VIDEO_XILINX_AXI4S_SWITCH
+ tristate "Xilinx AXI4-Stream Video Switch"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx AXI4-Stream Video Switch. This is a
+ V4L sub device based driver. It supports fixed (TDEST based)
+ as well as dynamic (control register based) routing.
+ Say M to modularize. Say N if unsure.
+
+config VIDEO_XILINX_CFA
+ tristate "Xilinx Video Color Filter Array"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Color Filter Array
+
+config VIDEO_XILINX_CRESAMPLE
+ tristate "Xilinx Video Chroma Resampler"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Chroma Resampler
+
+config VIDEO_XILINX_DEMOSAIC
+ tristate "Xilinx Video Demosaic IP"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx Video Demosaic IP. This is a V4L sub-device
+ based driver for the Demosaic IP that takes input a Bayer video
+ stream format as input and generates an RGB video output.
+ Say M to modularize. Say N if unsure.
+
+config VIDEO_XILINX_GAMMA
+ tristate "Xilinx Gamma Correction LUT"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx Gamma Correction LUT IP. This is a V4L sub-device
+ based driver that exposes V4L controls to adjust Red, Blue and Green
+ Gamma Correction.
+
+ Say M to modularize. Say N if unsure.
+
+config VIDEO_XILINX_HLS
+ tristate "Xilinx Video HLS Core"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video HLS Cores
+
+config VIDEO_XILINX_REMAPPER
+ tristate "Xilinx Video Remapper"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Remapper
+
+config VIDEO_XILINX_RGB2YUV
+ tristate "Xilinx Video RGB to YUV Convertor"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video RGB to YUV Convertor
+
+config VIDEO_XILINX_SCALER
+ tristate "Xilinx Video Scaler"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Scaler
+
+config VIDEO_XILINX_MULTISCALER
+ tristate "Xilinx Video Multiscaler"
+ depends on VIDEO_XILINX
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select V4L2_MEM2MEM_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ Driver for the Xilinx Video Multi Scaler. This is a V4L2 memory to
+ memory based driver. Multi-Scaler has max 8 channels which can be
+ programed for different scaling ratio.
+
+config VIDEO_XILINX_SDIRXSS
+ tristate "Xilinx SDI Rx Subsystem"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx SDI Rx Subsystem
+
+config VIDEO_XILINX_SWITCH
+ tristate "Xilinx Video Switch"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Switch
+
config VIDEO_XILINX_TPG
tristate "Xilinx Video Test Pattern Generator"
depends on VIDEO_XILINX
select VIDEO_XILINX_VTC
help
- Driver for the Xilinx Video Test Pattern Generator
+ Driver for the Xilinx Video Test Pattern Generator
+
+config VIDEO_XILINX_VPSS_CSC
+ tristate "Xilinx VPSS CSC"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Processing Sub-System (VPSS)
+ Color Space Conversion. The driver provides RGB to YUV444
+ conversion and provides video controls like Brightness,
+ Contrast, Color Gains that can be applied to video.
+ Say N if unsure. Say M to modularize.
+
+config VIDEO_XILINX_VPSS_SCALER
+ tristate "Xilinx Video VPSS Scaler"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx Video Processing Sub-System(VPSS) Scaler.
+ It allows upscaling and downscaling of video. It also supports
+ limited Color Space Conversion.
+ Say N if unsure.
config VIDEO_XILINX_VTC
tristate "Xilinx Video Timing Controller"
depends on VIDEO_XILINX
help
- Driver for the Xilinx Video Timing Controller
+ Driver for the Xilinx Video Timing Controller
+
+config VIDEO_XILINX_CSI2RXSS
+ tristate "Xilinx CSI2 Rx Subsystem"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx MIPI CSI2 Rx Subsystem
+
+config VIDEO_XILINX_SCD
+ tristate "Xilinx Scene Change Detect"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx Scene Change Detection Controller.
+ The driver allows applications to pass video buffers and
+ provides if scene change detection is present between
+ adjacent frames.
+
+config VIDEO_XILINX_M2M
+ tristate "Xilinx Video mem2mem"
+ depends on VIDEO_XILINX
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ Driver for Xilinx V4L2 mem2mem pipeline operation to achieve memory
+ copy between two different physical memories using DMA transfers.
endif #VIDEO_XILINX
diff --git a/drivers/media/platform/xilinx/Makefile b/drivers/media/platform/xilinx/Makefile
index 4cdc0b1ec7a5..f7838c307405 100644
--- a/drivers/media/platform/xilinx/Makefile
+++ b/drivers/media/platform/xilinx/Makefile
@@ -1,7 +1,26 @@
# SPDX-License-Identifier: GPL-2.0
xilinx-video-objs += xilinx-dma.o xilinx-vip.o xilinx-vipp.o
+xilinx-scd-objs += xilinx-scenechange.o xilinx-scenechange-channel.o \
+ xilinx-scenechange-dma.o
obj-$(CONFIG_VIDEO_XILINX) += xilinx-video.o
+obj-$(CONFIG_VIDEO_XILINX_AXI4S_SWITCH) += xilinx-axis-switch.o
+obj-$(CONFIG_VIDEO_XILINX_CFA) += xilinx-cfa.o
+obj-$(CONFIG_VIDEO_XILINX_CRESAMPLE) += xilinx-cresample.o
+obj-$(CONFIG_VIDEO_XILINX_CSI2RXSS) += xilinx-csi2rxss.o
+obj-$(CONFIG_VIDEO_XILINX_DEMOSAIC) += xilinx-demosaic.o
+obj-$(CONFIG_VIDEO_XILINX_GAMMA) += xilinx-gamma.o
+obj-$(CONFIG_VIDEO_XILINX_HLS) += xilinx-hls.o
+obj-$(CONFIG_VIDEO_XILINX_M2M) += xilinx-m2m.o
+obj-$(CONFIG_VIDEO_XILINX_MULTISCALER) += xilinx-multi-scaler.o
+obj-$(CONFIG_VIDEO_XILINX_REMAPPER) += xilinx-remapper.o
+obj-$(CONFIG_VIDEO_XILINX_RGB2YUV) += xilinx-rgb2yuv.o
+obj-$(CONFIG_VIDEO_XILINX_SCALER) += xilinx-scaler.o
+obj-$(CONFIG_VIDEO_XILINX_SCD) += xilinx-scd.o
+obj-$(CONFIG_VIDEO_XILINX_SDIRXSS) += xilinx-sdirxss.o
+obj-$(CONFIG_VIDEO_XILINX_SWITCH) += xilinx-switch.o
obj-$(CONFIG_VIDEO_XILINX_TPG) += xilinx-tpg.o
+obj-$(CONFIG_VIDEO_XILINX_VPSS_CSC) += xilinx-vpss-csc.o
+obj-$(CONFIG_VIDEO_XILINX_VPSS_SCALER) += xilinx-vpss-scaler.o
obj-$(CONFIG_VIDEO_XILINX_VTC) += xilinx-vtc.o
diff --git a/drivers/media/platform/xilinx/xilinx-axis-switch.c b/drivers/media/platform/xilinx/xilinx-axis-switch.c
new file mode 100644
index 000000000000..3963e364570a
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-axis-switch.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx AXI4-Stream Video Switch
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Author: Vishal Sagar <vishal.sagar@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XVSW_CTRL_REG 0x00
+#define XVSW_CTRL_REG_UPDATE_MASK BIT(1)
+
+#define XVSW_MI_MUX_REG_BASE 0x40
+#define XVSW_MI_MUX_VAL_MASK 0xF
+#define XVSW_MI_MUX_DISABLE_MASK BIT(31)
+
+#define MIN_VSW_SINKS 1
+#define MAX_VSW_SINKS 16
+#define MIN_VSW_SRCS 1
+#define MAX_VSW_SRCS 16
+
+/**
+ * struct xvswitch_device - Xilinx AXI4-Stream Switch device structure
+ * @dev: Platform structure
+ * @iomem: Base address of IP
+ * @subdev: The v4l2 subdev structure
+ * @pads: media pads
+ * @routing: sink pad connected to each source pad (-1 if none)
+ * @formats: active V4L2 media bus formats on sink pads
+ * @nsinks: number of sink pads (1 to 8)
+ * @nsources: number of source pads (2 to 8)
+ * @tdest_routing: Whether TDEST routing is enabled
+ * @aclk: Video clock
+ * @saxi_ctlclk: AXI-Lite control clock
+ */
+struct xvswitch_device {
+ struct device *dev;
+ void __iomem *iomem;
+ struct v4l2_subdev subdev;
+ struct media_pad *pads;
+ int routing[MAX_VSW_SRCS];
+ struct v4l2_mbus_framefmt *formats;
+ u32 nsinks;
+ u32 nsources;
+ bool tdest_routing;
+ struct clk *aclk;
+ struct clk *saxi_ctlclk;
+};
+
+static inline struct xvswitch_device *to_xvsw(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xvswitch_device, subdev);
+}
+
+static inline u32 xvswitch_read(struct xvswitch_device *xvsw, u32 addr)
+{
+ return ioread32(xvsw->iomem + addr);
+}
+
+static inline void xvswitch_write(struct xvswitch_device *xvsw, u32 addr,
+ u32 value)
+{
+ iowrite32(value, xvsw->iomem + addr);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xvsw_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ unsigned int i;
+
+ /* Nothing to be done in case of TDEST routing */
+ if (xvsw->tdest_routing)
+ return 0;
+
+ if (!enable) {
+ /* In control reg routing, disable all master ports */
+ for (i = 0; i < xvsw->nsources; i++) {
+ xvswitch_write(xvsw, XVSW_MI_MUX_REG_BASE + (i * 4),
+ XVSW_MI_MUX_DISABLE_MASK);
+ }
+ xvswitch_write(xvsw, XVSW_CTRL_REG, XVSW_CTRL_REG_UPDATE_MASK);
+ return 0;
+ }
+
+ /*
+ * In case of control reg routing,
+ * from routing table write the values into respective reg
+ * and enable
+ */
+ for (i = 0; i < MAX_VSW_SRCS; i++) {
+ u32 val;
+
+ if (xvsw->routing[i] != -1)
+ val = xvsw->routing[i];
+ else
+ val = XVSW_MI_MUX_DISABLE_MASK;
+
+ xvswitch_write(xvsw, XVSW_MI_MUX_REG_BASE + (i * 4),
+ val);
+ }
+
+ xvswitch_write(xvsw, XVSW_CTRL_REG, XVSW_CTRL_REG_UPDATE_MASK);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+xvsw_get_pad_format(struct xvswitch_device *xvsw,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xvsw->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xvsw->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xvsw_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ int pad = fmt->pad;
+
+ /*
+ * If control reg routing and pad is source pad then
+ * get corresponding sink pad. if no sink pad then
+ * clear the format and return
+ */
+
+ if (!xvsw->tdest_routing && pad >= xvsw->nsinks) {
+ pad = xvsw->routing[pad - xvsw->nsinks];
+ if (pad < 0) {
+ memset(&fmt->format, 0, sizeof(fmt->format));
+ return 0;
+ }
+ }
+
+ fmt->format = *xvsw_get_pad_format(xvsw, cfg, pad, fmt->which);
+
+ return 0;
+}
+
+static int xvsw_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ if (!xvsw->tdest_routing && fmt->pad >= xvsw->nsinks) {
+ /*
+ * In case of control reg routing,
+ * get the corresponding sink pad to source pad passed.
+ *
+ * The source pad format is always identical to the
+ * sink pad format and can't be modified.
+ *
+ * If sink pad found then get_format for that pad
+ * else clear the fmt->format as the source pad
+ * isn't connected and return.
+ */
+ return xvsw_get_format(subdev, cfg, fmt);
+ }
+
+ /*
+ * In TDEST routing mode, one can set any format on the pad as
+ * it can't be checked which pad's data will travel to
+ * which pad. E.g. In a system with 2 slaves and 4 masters,
+ * S0 or S1 data can reach M0 thru M3 based on TDEST
+ * S0 may have RBG and S1 may have YUV. M0, M1 stream RBG
+ * and M2, M3 stream YUV based on TDEST.
+ *
+ * In Control reg routing mode, set format only for sink pads.
+ */
+ format = xvsw_get_pad_format(xvsw, cfg, fmt->pad, fmt->which);
+
+ format->code = fmt->format.code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XVIP_MIN_WIDTH, XVIP_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XVIP_MIN_HEIGHT, XVIP_MAX_HEIGHT);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int xvsw_get_routing(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_routing *route)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ unsigned int i;
+ u32 min;
+
+ /* In case of tdest routing, we can't get routing */
+ if (xvsw->tdest_routing)
+ return -EINVAL;
+
+ mutex_lock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ if (xvsw->nsources < route->num_routes)
+ min = xvsw->nsources;
+ else
+ min = route->num_routes;
+
+ for (i = 0; i < min; ++i) {
+ route->routes[i].sink = xvsw->routing[i];
+ route->routes[i].source = i;
+ }
+
+ route->num_routes = xvsw->nsources;
+
+ mutex_unlock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ return 0;
+}
+
+static int xvsw_set_routing(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_routing *route)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ unsigned int i;
+ int ret = 0;
+
+ /* In case of tdest routing, we can't set routing */
+ if (xvsw->tdest_routing)
+ return -EINVAL;
+
+ mutex_lock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ if (subdev->entity.stream_count) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ for (i = 0; i < xvsw->nsources; ++i)
+ xvsw->routing[i] = -1;
+
+ for (i = 0; i < route->num_routes; ++i)
+ xvsw->routing[route->routes[i].source - xvsw->nsinks] =
+ route->routes[i].sink;
+
+done:
+ mutex_unlock(&subdev->entity.graph_obj.mdev->graph_mutex);
+ return ret;
+}
+
+static int xvsw_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xvsw_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xvsw_video_ops = {
+ .s_stream = xvsw_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xvsw_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xvsw_get_format,
+ .set_fmt = xvsw_set_format,
+ .get_routing = xvsw_get_routing,
+ .set_routing = xvsw_set_routing,
+};
+
+static struct v4l2_subdev_ops xvsw_ops = {
+ .video = &xvsw_video_ops,
+ .pad = &xvsw_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xvsw_internal_ops = {
+ .open = xvsw_open,
+ .close = xvsw_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static bool xvsw_has_route(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1)
+{
+ struct xvswitch_device *xvsw =
+ container_of(entity, struct xvswitch_device, subdev.entity);
+ unsigned int sink0, sink1;
+
+ /* Two sinks are never connected together. */
+ if (pad0 < xvsw->nsinks && pad1 < xvsw->nsinks)
+ return false;
+
+ /* In TDEST routing, assume all sinks and sources are connected */
+ if (xvsw->tdest_routing)
+ return true;
+
+ sink0 = pad0 < xvsw->nsinks ? pad0 : xvsw->routing[pad0 - xvsw->nsinks];
+ sink1 = pad1 < xvsw->nsinks ? pad1 : xvsw->routing[pad1 - xvsw->nsinks];
+
+ return sink0 == sink1;
+}
+
+static const struct media_entity_operations xvsw_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .has_route = xvsw_has_route,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xvsw_parse_of(struct xvswitch_device *xvsw)
+{
+ struct device_node *node = xvsw->dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ unsigned int nports = 0;
+ u32 routing_mode;
+ int ret;
+
+ ret = of_property_read_u32(node, "xlnx,num-si-slots", &xvsw->nsinks);
+ if (ret < 0 || xvsw->nsinks < MIN_VSW_SINKS ||
+ xvsw->nsinks > MAX_VSW_SINKS) {
+ dev_err(xvsw->dev, "missing or invalid xlnx,num-si-slots property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-mi-slots", &xvsw->nsources);
+ if (ret < 0 || xvsw->nsources < MIN_VSW_SRCS ||
+ xvsw->nsources > MAX_VSW_SRCS) {
+ dev_err(xvsw->dev, "missing or invalid xlnx,num-mi-slots property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,routing-mode", &routing_mode);
+ if (ret < 0 || routing_mode < 0 || routing_mode > 1) {
+ dev_err(xvsw->dev, "missing or invalid xlnx,routing property\n");
+ return ret;
+ }
+
+ if (!routing_mode)
+ xvsw->tdest_routing = true;
+
+ xvsw->aclk = devm_clk_get(xvsw->dev, "aclk");
+ if (IS_ERR(xvsw->aclk)) {
+ ret = PTR_ERR(xvsw->aclk);
+ dev_err(xvsw->dev, "failed to get ap_clk (%d)\n", ret);
+ return ret;
+ }
+
+ if (!xvsw->tdest_routing) {
+ xvsw->saxi_ctlclk = devm_clk_get(xvsw->dev,
+ "s_axi_ctl_clk");
+ if (IS_ERR(xvsw->saxi_ctlclk)) {
+ ret = PTR_ERR(xvsw->saxi_ctlclk);
+ dev_err(xvsw->dev,
+ "failed to get s_axi_ctl_clk (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (xvsw->tdest_routing && xvsw->nsinks > 1) {
+ dev_err(xvsw->dev, "sinks = %d. Driver Limitation max 1 sink in TDEST routing mode\n",
+ xvsw->nsinks);
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ for_each_child_of_node(ports, port) {
+ struct device_node *endpoint;
+
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+
+ endpoint = of_get_next_child(port, NULL);
+ if (!endpoint) {
+ dev_err(xvsw->dev, "No port at\n");
+ return -EINVAL;
+ }
+
+ /* Count the number of ports. */
+ nports++;
+ }
+
+ /* validate number of ports */
+ if (nports != (xvsw->nsinks + xvsw->nsources)) {
+ dev_err(xvsw->dev, "invalid number of ports %u\n", nports);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xvsw_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xvswitch_device *xvsw;
+ struct resource *res;
+ unsigned int npads;
+ unsigned int i, padcount;
+ int ret;
+
+ xvsw = devm_kzalloc(&pdev->dev, sizeof(*xvsw), GFP_KERNEL);
+ if (!xvsw)
+ return -ENOMEM;
+
+ xvsw->dev = &pdev->dev;
+
+ ret = xvsw_parse_of(xvsw);
+ if (ret < 0)
+ return ret;
+
+ /* ioremap only if control reg based routing */
+ if (!xvsw->tdest_routing) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xvsw->iomem = devm_ioremap_resource(xvsw->dev, res);
+ if (IS_ERR(xvsw->iomem))
+ return PTR_ERR(xvsw->iomem);
+ }
+
+ /*
+ * Initialize V4L2 subdevice and media entity. Pad numbers depend on the
+ * number of pads.
+ */
+ npads = xvsw->nsinks + xvsw->nsources;
+ xvsw->pads = devm_kzalloc(&pdev->dev, npads * sizeof(*xvsw->pads),
+ GFP_KERNEL);
+ if (!xvsw->pads)
+ return -ENOMEM;
+
+ for (i = 0; i < xvsw->nsinks; ++i)
+ xvsw->pads[i].flags = MEDIA_PAD_FL_SINK;
+
+ for (; i < npads; ++i)
+ xvsw->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ padcount = xvsw->tdest_routing ? npads : xvsw->nsinks;
+
+ /*
+ * In case of tdest routing, allocate format per pad.
+ * source pad format has to match one of the sink pads in tdest routing.
+ *
+ * Otherwise only allocate for sinks as sources will
+ * get the same pad format and corresponding sink.
+ * set format on src pad will return corresponding sinks data.
+ */
+ xvsw->formats = devm_kzalloc(&pdev->dev,
+ padcount * sizeof(*xvsw->formats),
+ GFP_KERNEL);
+ if (!xvsw->formats) {
+ dev_err(xvsw->dev, "No memory to allocate formats!\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < padcount; i++) {
+ xvsw->formats[i].code = MEDIA_BUS_FMT_RGB888_1X24;
+ xvsw->formats[i].field = V4L2_FIELD_NONE;
+ xvsw->formats[i].colorspace = V4L2_COLORSPACE_SRGB;
+ xvsw->formats[i].width = XVIP_MAX_WIDTH;
+ xvsw->formats[i].height = XVIP_MAX_HEIGHT;
+ }
+
+ /*
+ * Initialize the routing table if none are connected.
+ * Routing table is valid only incase routing is not TDEST based.
+ */
+ for (i = 0; i < MAX_VSW_SRCS; ++i)
+ xvsw->routing[i] = -1;
+
+ ret = clk_prepare_enable(xvsw->aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable aclk (%d)\n",
+ ret);
+ return ret;
+ }
+
+ if (!xvsw->tdest_routing) {
+ ret = clk_prepare_enable(xvsw->saxi_ctlclk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable s_axi_ctl_clk (%d)\n",
+ ret);
+ clk_disable_unprepare(xvsw->aclk);
+ return ret;
+ }
+ }
+
+ subdev = &xvsw->subdev;
+ v4l2_subdev_init(subdev, &xvsw_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xvsw_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xvsw);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ subdev->entity.ops = &xvsw_media_ops;
+
+ ret = media_entity_pads_init(&subdev->entity, npads, xvsw->pads);
+ if (ret < 0)
+ goto clk_error;
+
+ platform_set_drvdata(pdev, xvsw);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ dev_info(xvsw->dev, "Xilinx AXI4-Stream Switch found!\n");
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+clk_error:
+ if (!xvsw->tdest_routing)
+ clk_disable_unprepare(xvsw->saxi_ctlclk);
+ clk_disable_unprepare(xvsw->aclk);
+ return ret;
+}
+
+static int xvsw_remove(struct platform_device *pdev)
+{
+ struct xvswitch_device *xvsw = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xvsw->subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ if (!xvsw->tdest_routing)
+ clk_disable_unprepare(xvsw->saxi_ctlclk);
+ clk_disable_unprepare(xvsw->aclk);
+ return 0;
+}
+
+static const struct of_device_id xvsw_of_id_table[] = {
+ { .compatible = "xlnx,axis-switch-1.1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvsw_of_id_table);
+
+static struct platform_driver xvsw_driver = {
+ .driver = {
+ .name = "xilinx-axis-switch",
+ .of_match_table = xvsw_of_id_table,
+ },
+ .probe = xvsw_probe,
+ .remove = xvsw_remove,
+};
+
+module_platform_driver(xvsw_driver);
+
+MODULE_AUTHOR("Vishal Sagar <vishal.sagar@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx AXI4-Stream Switch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-cfa.c b/drivers/media/platform/xilinx/xilinx-cfa.c
new file mode 100644
index 000000000000..832fb7306563
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-cfa.c
@@ -0,0 +1,394 @@
+/*
+ * Xilinx Color Filter Array
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XCFA_BAYER_PHASE 0x100
+#define XCFA_BAYER_PHASE_RGGB 0
+#define XCFA_BAYER_PHASE_GRBG 1
+#define XCFA_BAYER_PHASE_GBRG 2
+#define XCFA_BAYER_PHASE_BGGR 3
+
+/**
+ * struct xcfa_device - Xilinx CFA device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: Xilinx Video IP formats
+ */
+struct xcfa_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+};
+
+static inline struct xcfa_device *to_cfa(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xcfa_device, xvip.subdev);
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xcfa_get_bayer_phase(const unsigned int code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ return XCFA_BAYER_PHASE_RGGB;
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ return XCFA_BAYER_PHASE_GRBG;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ return XCFA_BAYER_PHASE_GBRG;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ return XCFA_BAYER_PHASE_BGGR;
+ }
+
+ return -EINVAL;
+}
+
+static int xcfa_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xcfa_device *xcfa = to_cfa(subdev);
+ const unsigned int code = xcfa->formats[XVIP_PAD_SINK].code;
+ u32 bayer_phase;
+
+ if (!enable) {
+ xvip_stop(&xcfa->xvip);
+ return 0;
+ }
+
+ /* This always returns the valid bayer phase value */
+ bayer_phase = xcfa_get_bayer_phase(code);
+
+ xvip_write(&xcfa->xvip, XCFA_BAYER_PHASE, bayer_phase);
+
+ xvip_set_frame_size(&xcfa->xvip, &xcfa->formats[XVIP_PAD_SINK]);
+
+ xvip_start(&xcfa->xvip);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xcfa_get_pad_format(struct xcfa_device *xcfa,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xcfa->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xcfa->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xcfa_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcfa_device *xcfa = to_cfa(subdev);
+
+ fmt->format = *__xcfa_get_pad_format(xcfa, cfg, fmt->pad, fmt->which);
+
+ return 0;
+}
+
+static int xcfa_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcfa_device *xcfa = to_cfa(subdev);
+ struct v4l2_mbus_framefmt *format;
+ int bayer_phase;
+
+ format = __xcfa_get_pad_format(xcfa, cfg, fmt->pad, fmt->which);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ bayer_phase = xcfa_get_bayer_phase(fmt->format.code);
+ if (bayer_phase >= 0) {
+ xcfa->vip_formats[XVIP_PAD_SINK] =
+ xvip_get_format_by_code(fmt->format.code);
+ format->code = fmt->format.code;
+ }
+
+ xvip_set_format_size(format, fmt);
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad */
+ format = __xcfa_get_pad_format(xcfa, cfg, XVIP_PAD_SOURCE, fmt->which);
+
+ xvip_set_format_size(format, fmt);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int xcfa_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xcfa_device *xcfa = to_cfa(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xcfa->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xcfa->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xcfa_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xcfa_video_ops = {
+ .s_stream = xcfa_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xcfa_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xcfa_get_format,
+ .set_fmt = xcfa_set_format,
+};
+
+static struct v4l2_subdev_ops xcfa_ops = {
+ .video = &xcfa_video_ops,
+ .pad = &xcfa_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xcfa_internal_ops = {
+ .open = xcfa_open,
+ .close = xcfa_close,
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xcfa_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xcfa_pm_suspend(struct device *dev)
+{
+ struct xcfa_device *xcfa = dev_get_drvdata(dev);
+
+ xvip_suspend(&xcfa->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xcfa_pm_resume(struct device *dev)
+{
+ struct xcfa_device *xcfa = dev_get_drvdata(dev);
+
+ xvip_resume(&xcfa->xvip);
+
+ return 0;
+}
+
+/*
+ * Platform Device Driver
+ */
+
+static int xcfa_parse_of(struct xcfa_device *xcfa)
+{
+ struct device *dev = xcfa->xvip.dev;
+ struct device_node *node = xcfa->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id;
+ int ret;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "no reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "invalid reg in DT");
+ return -EINVAL;
+ }
+
+ xcfa->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ return 0;
+}
+
+static int xcfa_probe(struct platform_device *pdev)
+{
+ struct xcfa_device *xcfa;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ int ret;
+
+ xcfa = devm_kzalloc(&pdev->dev, sizeof(*xcfa), GFP_KERNEL);
+ if (!xcfa)
+ return -ENOMEM;
+
+ xcfa->xvip.dev = &pdev->dev;
+
+ ret = xcfa_parse_of(xcfa);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xcfa->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and initialize the core */
+ xvip_reset(&xcfa->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xcfa->xvip.subdev;
+ v4l2_subdev_init(subdev, &xcfa_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xcfa_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xcfa);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xcfa->default_formats[XVIP_PAD_SINK];
+ default_format->code = xcfa->vip_formats[XVIP_PAD_SINK]->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ xvip_get_frame_size(&xcfa->xvip, default_format);
+
+ xcfa->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xcfa->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xcfa->default_formats[XVIP_PAD_SINK];
+ default_format->code = xcfa->vip_formats[XVIP_PAD_SOURCE]->code;
+
+ xcfa->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xcfa->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xcfa->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xcfa_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xcfa->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xcfa);
+
+ xvip_print_version(&xcfa->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xcfa->xvip);
+ return ret;
+}
+
+static int xcfa_remove(struct platform_device *pdev)
+{
+ struct xcfa_device *xcfa = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xcfa->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xcfa->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xcfa_pm_ops, xcfa_pm_suspend, xcfa_pm_resume);
+
+static const struct of_device_id xcfa_of_id_table[] = {
+ { .compatible = "xlnx,v-cfa-7.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xcfa_of_id_table);
+
+static struct platform_driver xcfa_driver = {
+ .driver = {
+ .name = "xilinx-cfa",
+ .pm = &xcfa_pm_ops,
+ .of_match_table = xcfa_of_id_table,
+ },
+ .probe = xcfa_probe,
+ .remove = xcfa_remove,
+};
+
+module_platform_driver(xcfa_driver);
+
+MODULE_DESCRIPTION("Xilinx Color Filter Array Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-cresample.c b/drivers/media/platform/xilinx/xilinx-cresample.c
new file mode 100644
index 000000000000..05335c10a388
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-cresample.c
@@ -0,0 +1,447 @@
+/*
+ * Xilinx Chroma Resampler
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XCRESAMPLE_ENCODING 0x100
+#define XCRESAMPLE_ENCODING_FIELD (1 << 7)
+#define XCRESAMPLE_ENCODING_CHROMA (1 << 8)
+
+/**
+ * struct xcresample_device - Xilinx CRESAMPLE device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: Xilinx Video IP formats
+ * @ctrl_handler: control handler
+ */
+struct xcresample_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+static inline struct xcresample_device *to_cresample(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xcresample_device, xvip.subdev);
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xcresample_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xcresample_device *xcresample = to_cresample(subdev);
+
+ if (!enable) {
+ xvip_stop(&xcresample->xvip);
+ return 0;
+ }
+
+ xvip_set_frame_size(&xcresample->xvip,
+ &xcresample->formats[XVIP_PAD_SINK]);
+
+ xvip_start(&xcresample->xvip);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xcresample_get_pad_format(struct xcresample_device *xcresample,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xcresample->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xcresample->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xcresample_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcresample_device *xcresample = to_cresample(subdev);
+
+ fmt->format = *__xcresample_get_pad_format(xcresample, cfg, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int xcresample_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcresample_device *xcresample = to_cresample(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xcresample_get_pad_format(xcresample, cfg, fmt->pad,
+ fmt->which);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ xvip_set_format_size(format, fmt);
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = __xcresample_get_pad_format(xcresample, cfg, XVIP_PAD_SOURCE,
+ fmt->which);
+
+ xvip_set_format_size(format, fmt);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int xcresample_open(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ struct xcresample_device *xcresample = to_cresample(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xcresample->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xcresample->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xcresample_close(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xcresample_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct xcresample_device *xcresample =
+ container_of(ctrl->handler, struct xcresample_device,
+ ctrl_handler);
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_CRESAMPLE_FIELD_PARITY:
+ xvip_clr_or_set(&xcresample->xvip, XCRESAMPLE_ENCODING,
+ XCRESAMPLE_ENCODING_FIELD, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_CRESAMPLE_CHROMA_PARITY:
+ xvip_clr_or_set(&xcresample->xvip, XCRESAMPLE_ENCODING,
+ XCRESAMPLE_ENCODING_CHROMA, ctrl->val);
+ return 0;
+ }
+
+ return -EINVAL;
+
+}
+
+static const struct v4l2_ctrl_ops xcresample_ctrl_ops = {
+ .s_ctrl = xcresample_s_ctrl,
+};
+
+static struct v4l2_subdev_video_ops xcresample_video_ops = {
+ .s_stream = xcresample_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xcresample_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xcresample_get_format,
+ .set_fmt = xcresample_set_format,
+};
+
+static struct v4l2_subdev_ops xcresample_ops = {
+ .video = &xcresample_video_ops,
+ .pad = &xcresample_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xcresample_internal_ops = {
+ .open = xcresample_open,
+ .close = xcresample_close,
+};
+
+/*
+ * Control Configs
+ */
+
+static const char *const xcresample_parity_string[] = {
+ "Even",
+ "Odd",
+};
+
+static struct v4l2_ctrl_config xcresample_field = {
+ .ops = &xcresample_ctrl_ops,
+ .id = V4L2_CID_XILINX_CRESAMPLE_FIELD_PARITY,
+ .name = "Chroma Resampler: Encoding Field Parity",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = 1,
+ .qmenu = xcresample_parity_string,
+};
+
+static struct v4l2_ctrl_config xcresample_chroma = {
+ .ops = &xcresample_ctrl_ops,
+ .id = V4L2_CID_XILINX_CRESAMPLE_CHROMA_PARITY,
+ .name = "Chroma Resampler: Encoding Chroma Parity",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = 1,
+ .qmenu = xcresample_parity_string,
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xcresample_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xcresample_pm_suspend(struct device *dev)
+{
+ struct xcresample_device *xcresample = dev_get_drvdata(dev);
+
+ xvip_suspend(&xcresample->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xcresample_pm_resume(struct device *dev)
+{
+ struct xcresample_device *xcresample = dev_get_drvdata(dev);
+
+ xvip_resume(&xcresample->xvip);
+
+ return 0;
+}
+
+/*
+ * Platform Device Driver
+ */
+
+static int xcresample_parse_of(struct xcresample_device *xcresample)
+{
+ struct device *dev = xcresample->xvip.dev;
+ struct device_node *node = xcresample->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id;
+ int ret;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "no reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "invalid reg in DT");
+ return -EINVAL;
+ }
+
+ xcresample->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ return 0;
+}
+
+static int xcresample_probe(struct platform_device *pdev)
+{
+ struct xcresample_device *xcresample;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ int ret;
+
+ xcresample = devm_kzalloc(&pdev->dev, sizeof(*xcresample), GFP_KERNEL);
+ if (!xcresample)
+ return -ENOMEM;
+
+ xcresample->xvip.dev = &pdev->dev;
+
+ ret = xcresample_parse_of(xcresample);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xcresample->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and initialize the core */
+ xvip_reset(&xcresample->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xcresample->xvip.subdev;
+ v4l2_subdev_init(subdev, &xcresample_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xcresample_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xcresample);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xcresample->default_formats[XVIP_PAD_SINK];
+ default_format->code = xcresample->vip_formats[XVIP_PAD_SINK]->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ xvip_get_frame_size(&xcresample->xvip, default_format);
+
+ xcresample->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xcresample->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xcresample->default_formats[XVIP_PAD_SINK];
+ default_format->code = xcresample->vip_formats[XVIP_PAD_SOURCE]->code;
+
+ xcresample->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xcresample->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xcresample->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xcresample_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xcresample->pads);
+ if (ret < 0)
+ goto error;
+
+ v4l2_ctrl_handler_init(&xcresample->ctrl_handler, 2);
+ xcresample_field.def =
+ (xvip_read(&xcresample->xvip, XCRESAMPLE_ENCODING) &
+ XCRESAMPLE_ENCODING_FIELD) ? 1 : 0;
+ v4l2_ctrl_new_custom(&xcresample->ctrl_handler, &xcresample_field,
+ NULL);
+ xcresample_chroma.def =
+ (xvip_read(&xcresample->xvip, XCRESAMPLE_ENCODING) &
+ XCRESAMPLE_ENCODING_CHROMA) ? 1 : 0;
+ v4l2_ctrl_new_custom(&xcresample->ctrl_handler, &xcresample_chroma,
+ NULL);
+ if (xcresample->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xcresample->ctrl_handler.error;
+ goto error;
+ }
+ subdev->ctrl_handler = &xcresample->ctrl_handler;
+
+ platform_set_drvdata(pdev, xcresample);
+
+ xvip_print_version(&xcresample->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xcresample->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xcresample->xvip);
+ return ret;
+}
+
+static int xcresample_remove(struct platform_device *pdev)
+{
+ struct xcresample_device *xcresample = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xcresample->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xcresample->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xcresample->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xcresample_pm_ops, xcresample_pm_suspend,
+ xcresample_pm_resume);
+
+static const struct of_device_id xcresample_of_id_table[] = {
+ { .compatible = "xlnx,v-cresample-4.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xcresample_of_id_table);
+
+static struct platform_driver xcresample_driver = {
+ .driver = {
+ .name = "xilinx-cresample",
+ .pm = &xcresample_pm_ops,
+ .of_match_table = xcresample_of_id_table,
+ },
+ .probe = xcresample_probe,
+ .remove = xcresample_remove,
+};
+
+module_platform_driver(xcresample_driver);
+
+MODULE_DESCRIPTION("Xilinx Chroma Resampler Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-csi2rxss.c b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
new file mode 100644
index 000000000000..fc58a5268a9e
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
@@ -0,0 +1,2023 @@
+/*
+ * Xilinx MIPI CSI2 Subsystem
+ *
+ * Copyright (C) 2016 Xilinx, Inc.
+ *
+ * Contacts: Vishal Sagar <vsagar@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/media/xilinx-vip.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/v4l2-subdev.h>
+#include <linux/xilinx-csi2rxss.h>
+#include <linux/xilinx-v4l2-controls.h>
+#include <media/media-entity.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include "xilinx-vip.h"
+
+/*
+ * MIPI CSI2 Rx register map, bitmask and offsets
+ */
+#define XCSI_CCR_OFFSET 0x00000000
+#define XCSI_CCR_SOFTRESET_SHIFT 1
+#define XCSI_CCR_COREENB_SHIFT 0
+#define XCSI_CCR_SOFTRESET_MASK BIT(XCSI_CCR_SOFTRESET_SHIFT)
+#define XCSI_CCR_COREENB_MASK BIT(XCSI_CCR_COREENB_SHIFT)
+
+#define XCSI_PCR_OFFSET 0x00000004
+#define XCSI_PCR_MAXLANES_MASK 0x00000018
+#define XCSI_PCR_ACTLANES_MASK 0x00000003
+#define XCSI_PCR_MAXLANES_SHIFT 3
+#define XCSI_PCR_ACTLANES_SHIFT 0
+
+#define XCSI_CSR_OFFSET 0x00000010
+#define XCSI_CSR_PKTCOUNT_SHIFT 16
+#define XCSI_CSR_SPFIFOFULL_SHIFT 3
+#define XCSI_CSR_SPFIFONE_SHIFT 2
+#define XCSI_CSR_SLBF_SHIFT 1
+#define XCSI_CSR_RIPCD_SHIFT 0
+#define XCSI_CSR_PKTCOUNT_MASK 0xFFFF0000
+#define XCSI_CSR_SPFIFOFULL_MASK BIT(XCSI_CSR_SPFIFOFULL_SHIFT)
+#define XCSI_CSR_SPFIFONE_MASK BIT(XCSI_CSR_SPFIFONE_SHIFT)
+#define XCSI_CSR_SLBF_MASK BIT(XCSI_CSR_SLBF_SHIFT)
+#define XCSI_CSR_RIPCD_MASK BIT(XCSI_CSR_RIPCD_SHIFT)
+
+#define XCSI_GIER_OFFSET 0x00000020
+#define XCSI_GIER_GIE_SHIFT 0
+#define XCSI_GIER_GIE_MASK BIT(XCSI_GIER_GIE_SHIFT)
+#define XCSI_GIER_SET 1
+#define XCSI_GIER_RESET 0
+
+#define XCSI_ISR_OFFSET 0x00000024
+#define XCSI_ISR_FR_SHIFT 31
+#define XCSI_ISR_VCX_SHIFT 30
+#define XCSI_ISR_ILC_SHIFT 21
+#define XCSI_ISR_SPFIFOF_SHIFT 20
+#define XCSI_ISR_SPFIFONE_SHIFT 19
+#define XCSI_ISR_SLBF_SHIFT 18
+#define XCSI_ISR_STOP_SHIFT 17
+#define XCSI_ISR_SOTERR_SHIFT 13
+#define XCSI_ISR_SOTSYNCERR_SHIFT 12
+#define XCSI_ISR_ECC2BERR_SHIFT 11
+#define XCSI_ISR_ECC1BERR_SHIFT 10
+#define XCSI_ISR_CRCERR_SHIFT 9
+#define XCSI_ISR_DATAIDERR_SHIFT 8
+#define XCSI_ISR_VC3FSYNCERR_SHIFT 7
+#define XCSI_ISR_VC3FLVLERR_SHIFT 6
+#define XCSI_ISR_VC2FSYNCERR_SHIFT 5
+#define XCSI_ISR_VC2FLVLERR_SHIFT 4
+#define XCSI_ISR_VC1FSYNCERR_SHIFT 3
+#define XCSI_ISR_VC1FLVLERR_SHIFT 2
+#define XCSI_ISR_VC0FSYNCERR_SHIFT 1
+#define XCSI_ISR_VC0FLVLERR_SHIFT 0
+#define XCSI_ISR_FR_MASK BIT(XCSI_ISR_FR_SHIFT)
+#define XCSI_ISR_VCX_MASK BIT(XCSI_ISR_VCX_SHIFT)
+#define XCSI_ISR_ILC_MASK BIT(XCSI_ISR_ILC_SHIFT)
+#define XCSI_ISR_SPFIFOF_MASK BIT(XCSI_ISR_SPFIFOF_SHIFT)
+#define XCSI_ISR_SPFIFONE_MASK BIT(XCSI_ISR_SPFIFONE_SHIFT)
+#define XCSI_ISR_SLBF_MASK BIT(XCSI_ISR_SLBF_SHIFT)
+#define XCSI_ISR_STOP_MASK BIT(XCSI_ISR_STOP_SHIFT)
+#define XCSI_ISR_SOTERR_MASK BIT(XCSI_ISR_SOTERR_SHIFT)
+#define XCSI_ISR_SOTSYNCERR_MASK BIT(XCSI_ISR_SOTSYNCERR_SHIFT)
+#define XCSI_ISR_ECC2BERR_MASK BIT(XCSI_ISR_ECC2BERR_SHIFT)
+#define XCSI_ISR_ECC1BERR_MASK BIT(XCSI_ISR_ECC1BERR_SHIFT)
+#define XCSI_ISR_CRCERR_MASK BIT(XCSI_ISR_CRCERR_SHIFT)
+#define XCSI_ISR_DATAIDERR_MASK BIT(XCSI_ISR_DATAIDERR_SHIFT)
+#define XCSI_ISR_VC3FSYNCERR_MASK BIT(XCSI_ISR_VC3FSYNCERR_SHIFT)
+#define XCSI_ISR_VC3FLVLERR_MASK BIT(XCSI_ISR_VC3FLVLERR_SHIFT)
+#define XCSI_ISR_VC2FSYNCERR_MASK BIT(XCSI_ISR_VC2FSYNCERR_SHIFT)
+#define XCSI_ISR_VC2FLVLERR_MASK BIT(XCSI_ISR_VC2FLVLERR_SHIFT)
+#define XCSI_ISR_VC1FSYNCERR_MASK BIT(XCSI_ISR_VC1FSYNCERR_SHIFT)
+#define XCSI_ISR_VC1FLVLERR_MASK BIT(XCSI_ISR_VC1FLVLERR_SHIFT)
+#define XCSI_ISR_VC0FSYNCERR_MASK BIT(XCSI_ISR_VC0FSYNCERR_SHIFT)
+#define XCSI_ISR_VC0FLVLERR_MASK BIT(XCSI_ISR_VC0FLVLERR_SHIFT)
+#define XCSI_ISR_ALLINTR_MASK 0xC03FFFFF
+
+#define XCSI_INTR_PROT_MASK (XCSI_ISR_VC3FSYNCERR_MASK | \
+ XCSI_ISR_VC3FLVLERR_MASK | \
+ XCSI_ISR_VC2FSYNCERR_MASK | \
+ XCSI_ISR_VC2FLVLERR_MASK | \
+ XCSI_ISR_VC1FSYNCERR_MASK | \
+ XCSI_ISR_VC1FLVLERR_MASK | \
+ XCSI_ISR_VC0FSYNCERR_MASK | \
+ XCSI_ISR_VC0FLVLERR_MASK | \
+ XCSI_ISR_VCX_MASK)
+
+#define XCSI_INTR_PKTLVL_MASK (XCSI_ISR_ECC2BERR_MASK | \
+ XCSI_ISR_ECC1BERR_MASK | \
+ XCSI_ISR_CRCERR_MASK | \
+ XCSI_ISR_DATAIDERR_MASK)
+
+#define XCSI_INTR_DPHY_MASK (XCSI_ISR_SOTERR_MASK | \
+ XCSI_ISR_SOTSYNCERR_MASK)
+
+#define XCSI_INTR_SPKT_MASK (XCSI_ISR_SPFIFOF_MASK | \
+ XCSI_ISR_SPFIFONE_MASK)
+
+#define XCSI_INTR_FRAMERCVD_MASK (XCSI_ISR_FR_MASK)
+
+#define XCSI_INTR_ERR_MASK (XCSI_ISR_ILC_MASK | \
+ XCSI_ISR_SLBF_MASK | \
+ XCSI_ISR_STOP_MASK)
+
+#define XCSI_IER_OFFSET 0x00000028
+#define XCSI_IER_FR_SHIFT 31
+#define XCSI_IER_VCX_SHIFT 30
+#define XCSI_IER_ILC_SHIFT 21
+#define XCSI_IER_SPFIFOF_SHIFT 20
+#define XCSI_IER_SPFIFONE_SHIFT 19
+#define XCSI_IER_SLBF_SHIFT 18
+#define XCSI_IER_STOP_SHIFT 17
+#define XCSI_IER_SOTERR_SHIFT 13
+#define XCSI_IER_SOTSYNCERR_SHIFT 12
+#define XCSI_IER_ECC2BERR_SHIFT 11
+#define XCSI_IER_ECC1BERR_SHIFT 10
+#define XCSI_IER_CRCERR_SHIFT 9
+#define XCSI_IER_DATAIDERR_SHIFT 8
+#define XCSI_IER_VC3FSYNCERR_SHIFT 7
+#define XCSI_IER_VC3FLVLERR_SHIFT 6
+#define XCSI_IER_VC2FSYNCERR_SHIFT 5
+#define XCSI_IER_VC2FLVLERR_SHIFT 4
+#define XCSI_IER_VC1FSYNCERR_SHIFT 3
+#define XCSI_IER_VC1FLVLERR_SHIFT 2
+#define XCSI_IER_VC0FSYNCERR_SHIFT 1
+#define XCSI_IER_VC0FLVLERR_SHIFT 0
+#define XCSI_IER_FR_MASK BIT(XCSI_IER_FR_SHIFT)
+#define XCSI_IER_VCX_MASK BIT(XCSI_IER_VCX_SHIFT)
+#define XCSI_IER_ILC_MASK BIT(XCSI_IER_ILC_SHIFT)
+#define XCSI_IER_SPFIFOF_MASK BIT(XCSI_IER_SPFIFOF_SHIFT)
+#define XCSI_IER_SPFIFONE_MASK BIT(XCSI_IER_SPFIFONE_SHIFT)
+#define XCSI_IER_SLBF_MASK BIT(XCSI_IER_SLBF_SHIFT)
+#define XCSI_IER_STOP_MASK BIT(XCSI_IER_STOP_SHIFT)
+#define XCSI_IER_SOTERR_MASK BIT(XCSI_IER_SOTERR_SHIFT)
+#define XCSI_IER_SOTSYNCERR_MASK BIT(XCSI_IER_SOTSYNCERR_SHIFT)
+#define XCSI_IER_ECC2BERR_MASK BIT(XCSI_IER_ECC2BERR_SHIFT)
+#define XCSI_IER_ECC1BERR_MASK BIT(XCSI_IER_ECC1BERR_SHIFT)
+#define XCSI_IER_CRCERR_MASK BIT(XCSI_IER_CRCERR_SHIFT)
+#define XCSI_IER_DATAIDERR_MASK BIT(XCSI_IER_DATAIDERR_SHIFT)
+#define XCSI_IER_VC3FSYNCERR_MASK BIT(XCSI_IER_VC3FSYNCERR_SHIFT)
+#define XCSI_IER_VC3FLVLERR_MASK BIT(XCSI_IER_VC3FLVLERR_SHIFT)
+#define XCSI_IER_VC2FSYNCERR_MASK BIT(XCSI_IER_VC2FSYNCERR_SHIFT)
+#define XCSI_IER_VC2FLVLERR_MASK BIT(XCSI_IER_VC2FLVLERR_SHIFT)
+#define XCSI_IER_VC1FSYNCERR_MASK BIT(XCSI_IER_VC1FSYNCERR_SHIFT)
+#define XCSI_IER_VC1FLVLERR_MASK BIT(XCSI_IER_VC1FLVLERR_SHIFT)
+#define XCSI_IER_VC0FSYNCERR_MASK BIT(XCSI_IER_VC0FSYNCERR_SHIFT)
+#define XCSI_IER_VC0FLVLERR_MASK BIT(XCSI_IER_VC0FLVLERR_SHIFT)
+#define XCSI_IER_ALLINTR_MASK 0xC03FFFFF
+
+#define XCSI_SPKTR_OFFSET 0x00000030
+#define XCSI_SPKTR_DATA_SHIFT 8
+#define XCSI_SPKTR_VC_SHIFT 6
+#define XCSI_SPKTR_DT_SHIFT 0
+#define XCSI_SPKTR_DATA_MASK 0x00FFFF00
+#define XCSI_SPKTR_VC_MASK 0x000000C0
+#define XCSI_SPKTR_DT_MASK 0x0000003F
+
+#define XCSI_VCXR_OFFSET 0x00000034
+#define XCSI_VCXR_VC15FSYNCERR_MASK BIT(23)
+#define XCSI_VCXR_VC15FLVLERR_MASK BIT(22)
+#define XCSI_VCXR_VC14FSYNCERR_MASK BIT(21)
+#define XCSI_VCXR_VC14FLVLERR_MASK BIT(20)
+#define XCSI_VCXR_VC13FSYNCERR_MASK BIT(19)
+#define XCSI_VCXR_VC13FLVLERR_MASK BIT(18)
+#define XCSI_VCXR_VC12FSYNCERR_MASK BIT(17)
+#define XCSI_VCXR_VC12FLVLERR_MASK BIT(16)
+#define XCSI_VCXR_VC11FSYNCERR_MASK BIT(15)
+#define XCSI_VCXR_VC11FLVLERR_MASK BIT(14)
+#define XCSI_VCXR_VC10FSYNCERR_MASK BIT(13)
+#define XCSI_VCXR_VC10FLVLERR_MASK BIT(12)
+#define XCSI_VCXR_VC9FSYNCERR_MASK BIT(11)
+#define XCSI_VCXR_VC9FLVLERR_MASK BIT(10)
+#define XCSI_VCXR_VC8FSYNCERR_MASK BIT(9)
+#define XCSI_VCXR_VC8FLVLERR_MASK BIT(8)
+#define XCSI_VCXR_VC7FSYNCERR_MASK BIT(7)
+#define XCSI_VCXR_VC7FLVLERR_MASK BIT(6)
+#define XCSI_VCXR_VC6FSYNCERR_MASK BIT(5)
+#define XCSI_VCXR_VC6FLVLERR_MASK BIT(4)
+#define XCSI_VCXR_VC5FSYNCERR_MASK BIT(3)
+#define XCSI_VCXR_VC5FLVLERR_MASK BIT(2)
+#define XCSI_VCXR_VC4FSYNCERR_MASK BIT(1)
+#define XCSI_VCXR_VC4FLVLERR_MASK BIT(0)
+#define XCSI_VCXR_MASK 0x00FFFFFF
+
+#define XCSI_CLKINFR_OFFSET 0x0000003C
+#define XCSI_CLKINFR_STOP_SHIFT 1
+#define XCSI_CLKINFR_STOP_MASK BIT(XCSI_CLKINFR_STOP_SHIFT)
+
+#define XCSI_L0INFR_OFFSET 0x00000040
+#define XCSI_L1INFR_OFFSET 0x00000044
+#define XCSI_L2INFR_OFFSET 0x00000048
+#define XCSI_L3INFR_OFFSET 0x0000004C
+#define XCSI_LXINFR_STOP_SHIFT 5
+#define XCSI_LXINFR_SOTERR_SHIFT 1
+#define XCSI_LXINFR_SOTSYNCERR_SHIFT 0
+#define XCSI_LXINFR_STOP_MASK BIT(XCSI_LXINFR_STOP_SHIFT)
+#define XCSI_LXINFR_SOTERR_MASK BIT(XCSI_LXINFR_SOTERR_SHIFT)
+#define XCSI_LXINFR_SOTSYNCERR_MASK BIT(XCSI_LXINFR_SOTSYNCERR_SHIFT)
+
+#define XCSI_VC0INF1R_OFFSET 0x00000060
+#define XCSI_VC1INF1R_OFFSET 0x00000068
+#define XCSI_VC2INF1R_OFFSET 0x00000070
+#define XCSI_VC3INF1R_OFFSET 0x00000078
+#define XCSI_VC4INF1R_OFFSET 0x00000080
+#define XCSI_VC5INF1R_OFFSET 0x00000088
+#define XCSI_VC6INF1R_OFFSET 0x00000090
+#define XCSI_VC7INF1R_OFFSET 0x00000098
+#define XCSI_VC8INF1R_OFFSET 0x000000A0
+#define XCSI_VC9INF1R_OFFSET 0x000000A8
+#define XCSI_VC10INF1R_OFFSET 0x000000B0
+#define XCSI_VC11INF1R_OFFSET 0x000000B8
+#define XCSI_VC12INF1R_OFFSET 0x000000C0
+#define XCSI_VC13INF1R_OFFSET 0x000000C8
+#define XCSI_VC14INF1R_OFFSET 0x000000D0
+#define XCSI_VC15INF1R_OFFSET 0x000000D8
+#define XCSI_VCXINF1R_LINECOUNT_SHIFT 16
+#define XCSI_VCXINF1R_BYTECOUNT_SHIFT 0
+#define XCSI_VCXINF1R_LINECOUNT_MASK 0xFFFF0000
+#define XCSI_VCXINF1R_BYTECOUNT_MASK 0x0000FFFF
+
+#define XCSI_VC0INF2R_OFFSET 0x00000064
+#define XCSI_VC1INF2R_OFFSET 0x0000006C
+#define XCSI_VC2INF2R_OFFSET 0x00000074
+#define XCSI_VC3INF2R_OFFSET 0x0000007C
+#define XCSI_VC4INF2R_OFFSET 0x00000084
+#define XCSI_VC5INF2R_OFFSET 0x0000008C
+#define XCSI_VC6INF2R_OFFSET 0x00000094
+#define XCSI_VC7INF2R_OFFSET 0x0000009C
+#define XCSI_VC8INF2R_OFFSET 0x000000A4
+#define XCSI_VC9INF2R_OFFSET 0x000000AC
+#define XCSI_VC10INF2R_OFFSET 0x000000B4
+#define XCSI_VC11INF2R_OFFSET 0x000000BC
+#define XCSI_VC12INF2R_OFFSET 0x000000C4
+#define XCSI_VC13INF2R_OFFSET 0x000000CC
+#define XCSI_VC14INF2R_OFFSET 0x000000D4
+#define XCSI_VC15INF2R_OFFSET 0x000000DC
+#define XCSI_VCXINF2R_DATATYPE_SHIFT 0
+#define XCSI_VCXINF2R_DATATYPE_MASK 0x0000003F
+
+#define XDPHY_CTRLREG_OFFSET 0x0
+#define XDPHY_CTRLREG_DPHYEN_SHIFT 1
+#define XDPHY_CTRLREG_DPHYEN_MASK BIT(XDPHY_CTRLREG_DPHYEN_SHIFT)
+
+#define XDPHY_CLKSTATREG_OFFSET 0x18
+#define XDPHY_CLKSTATREG_MODE_SHIFT 0
+#define XDPHY_CLKSTATREG_MODE_MASK 0x3
+#define XDPHY_LOW_POWER_MODE 0x0
+#define XDPHY_HI_SPEED_MODE 0x1
+#define XDPHY_ESC_MODE 0x2
+
+/*
+ * Interrupt mask
+ */
+#define XCSI_INTR_MASK (XCSI_ISR_ALLINTR_MASK & ~XCSI_ISR_STOP_MASK)
+/*
+ * Timeout for reset
+ */
+#define XCSI_TIMEOUT_VAL (1000) /* us */
+
+/*
+ * Max string length for CSI Data type string
+ */
+#define MAX_XIL_CSIDT_STR_LENGTH 64
+
+/*
+ * Maximum number of short packet events per file handle.
+ */
+#define XCSI_MAX_SPKT (512)
+
+/* Number of media pads */
+#define XILINX_CSI_MEDIA_PADS (2)
+
+#define XCSI_DEFAULT_WIDTH (1920)
+#define XCSI_DEFAULT_HEIGHT (1080)
+
+#define XCSI_DPHY_CLK_MIN 197000000000UL
+#define XCSI_DPHY_CLK_MAX 203000000000UL
+#define XCSI_DPHY_CLK_REQ 200000000000UL
+
+/*
+ * Macro to return "true" or "false" string if bit is set
+ */
+#define XCSI_GET_BITSET_STR(val, mask) (val) & (mask) ? "true" : "false"
+
+#define XCSI_CLK_PROP BIT(0)
+
+/**
+ * struct xcsi2rxss_feature - dt or IP property structure
+ * @flags: Bitmask of properties enabled in IP or dt
+ */
+struct xcsi2rxss_feature {
+ u32 flags;
+};
+
+enum CSI_DataTypes {
+ MIPI_CSI_DT_FRAME_START_CODE = 0x00,
+ MIPI_CSI_DT_FRAME_END_CODE,
+ MIPI_CSI_DT_LINE_START_CODE,
+ MIPI_CSI_DT_LINE_END_CODE,
+ MIPI_CSI_DT_SYNC_RSVD_04,
+ MIPI_CSI_DT_SYNC_RSVD_05,
+ MIPI_CSI_DT_SYNC_RSVD_06,
+ MIPI_CSI_DT_SYNC_RSVD_07,
+ MIPI_CSI_DT_GSPKT_08,
+ MIPI_CSI_DT_GSPKT_09,
+ MIPI_CSI_DT_GSPKT_0A,
+ MIPI_CSI_DT_GSPKT_0B,
+ MIPI_CSI_DT_GSPKT_0C,
+ MIPI_CSI_DT_GSPKT_0D,
+ MIPI_CSI_DT_GSPKT_0E,
+ MIPI_CSI_DT_GSPKT_0F,
+ MIPI_CSI_DT_GLPKT_10,
+ MIPI_CSI_DT_GLPKT_11,
+ MIPI_CSI_DT_GLPKT_12,
+ MIPI_CSI_DT_GLPKT_13,
+ MIPI_CSI_DT_GLPKT_14,
+ MIPI_CSI_DT_GLPKT_15,
+ MIPI_CSI_DT_GLPKT_16,
+ MIPI_CSI_DT_GLPKT_17,
+ MIPI_CSI_DT_YUV_420_8B,
+ MIPI_CSI_DT_YUV_420_10B,
+ MIPI_CSI_DT_YUV_420_8B_LEGACY,
+ MIPI_CSI_DT_YUV_RSVD,
+ MIPI_CSI_DT_YUV_420_8B_CSPS,
+ MIPI_CSI_DT_YUV_420_10B_CSPS,
+ MIPI_CSI_DT_YUV_422_8B,
+ MIPI_CSI_DT_YUV_422_10B,
+ MIPI_CSI_DT_RGB_444,
+ MIPI_CSI_DT_RGB_555,
+ MIPI_CSI_DT_RGB_565,
+ MIPI_CSI_DT_RGB_666,
+ MIPI_CSI_DT_RGB_888,
+ MIPI_CSI_DT_RGB_RSVD_25,
+ MIPI_CSI_DT_RGB_RSVD_26,
+ MIPI_CSI_DT_RGB_RSVD_27,
+ MIPI_CSI_DT_RAW_6,
+ MIPI_CSI_DT_RAW_7,
+ MIPI_CSI_DT_RAW_8,
+ MIPI_CSI_DT_RAW_10,
+ MIPI_CSI_DT_RAW_12,
+ MIPI_CSI_DT_RAW_14,
+ MIPI_CSI_DT_RAW_16,
+ MIPI_CSI_DT_RAW_20,
+ MIPI_CSI_DT_USER_30,
+ MIPI_CSI_DT_USER_31,
+ MIPI_CSI_DT_USER_32,
+ MIPI_CSI_DT_USER_33,
+ MIPI_CSI_DT_USER_34,
+ MIPI_CSI_DT_USER_35,
+ MIPI_CSI_DT_USER_36,
+ MIPI_CSI_DT_USER_37,
+ MIPI_CSI_DT_RSVD_38,
+ MIPI_CSI_DT_RSVD_39,
+ MIPI_CSI_DT_RSVD_3A,
+ MIPI_CSI_DT_RSVD_3B,
+ MIPI_CSI_DT_RSVD_3C,
+ MIPI_CSI_DT_RSVD_3D,
+ MIPI_CSI_DT_RSVD_3E,
+ MIPI_CSI_DT_RSVD_3F
+};
+
+/**
+ * struct pixel_format - Data Type to string name structure
+ * @PixelFormat: MIPI CSI2 Data type
+ * @PixelFormatStr: String name of Data Type
+ */
+struct pixel_format {
+ enum CSI_DataTypes PixelFormat;
+ char PixelFormatStr[MAX_XIL_CSIDT_STR_LENGTH];
+};
+
+/**
+ * struct xcsi2rxss_event - Event log structure
+ * @mask: Event mask
+ * @name: Name of the event
+ * @counter: Count number of events
+ */
+struct xcsi2rxss_event {
+ u32 mask;
+ const char * const name;
+ unsigned int counter;
+};
+
+/*
+ * struct xcsi2rxss_core - Core configuration CSI2 Rx Subsystem device structure
+ * @dev: Platform structure
+ * @iomem: Base address of subsystem
+ * @irq: requested irq number
+ * @dphy_present: Flag for DPHY register interface presence
+ * @dphy_offset: DPHY registers offset
+ * @enable_active_lanes: If number of active lanes can be modified
+ * @max_num_lanes: Maximum number of lanes present
+ * @vfb: Video Format Bridge enabled or not
+ * @ppc: pixels per clock
+ * @vc: Virtual Channel
+ * @axis_tdata_width: AXI Stream data width
+ * @datatype: Data type filter
+ * @pxlformat: String with CSI pixel format from IP
+ * @num_lanes: Number of lanes requested from application
+ * @events: Structure to maintain event logs
+ * @vcx_events: Structure to maintain VCX event logs
+ * @en_vcx: If more than 4 VC are enabled.
+ * @cfg: Pointer to csi2rxss config structure
+ * @lite_aclk: AXI4-Lite interface clock
+ * @video_aclk: Video clock
+ * @dphy_clk_200M: 200MHz DPHY clock
+ */
+struct xcsi2rxss_core {
+ struct device *dev;
+ void __iomem *iomem;
+ int irq;
+ u32 dphy_offset;
+ bool dphy_present;
+ bool enable_active_lanes;
+ u32 max_num_lanes;
+ bool vfb;
+ u32 ppc;
+ u32 vc;
+ u32 axis_tdata_width;
+ u32 datatype;
+ const char *pxlformat;
+ u32 num_lanes;
+ struct xcsi2rxss_event *events;
+ struct xcsi2rxss_event *vcx_events;
+ bool en_vcx;
+ const struct xcsi2rxss_feature *cfg;
+ struct clk *lite_aclk;
+ struct clk *video_aclk;
+ struct clk *dphy_clk_200M;
+};
+
+/**
+ * struct xcsi2rxss_state - CSI2 Rx Subsystem device structure
+ * @core: Core structure for MIPI CSI2 Rx Subsystem
+ * @subdev: The v4l2 subdev structure
+ * @ctrl_handler: control handler
+ * @formats: Active V4L2 formats on each pad
+ * @default_format: default V4L2 media bus format
+ * @vip_format: format information corresponding to the active format
+ * @event: Holds the short packet event
+ * @lock: mutex for serializing operations
+ * @pads: media pads
+ * @npads: number of pads
+ * @streaming: Flag for storing streaming state
+ * @suspended: Flag for storing suspended state
+ *
+ * This structure contains the device driver related parameters
+ */
+struct xcsi2rxss_state {
+ struct xcsi2rxss_core core;
+ struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_format;
+ const struct xvip_video_format *vip_format;
+ struct v4l2_event event;
+ struct mutex lock;
+ struct media_pad pads[XILINX_CSI_MEDIA_PADS];
+ unsigned int npads;
+ bool streaming;
+ bool suspended;
+};
+
+static const struct xcsi2rxss_feature xlnx_csi2rxss_v4_0 = {
+ .flags = XCSI_CLK_PROP,
+};
+
+static const struct xcsi2rxss_feature xlnx_csi2rxss_v2_0 = {
+ .flags = 0,
+};
+
+static const struct of_device_id xcsi2rxss_of_id_table[] = {
+ { .compatible = "xlnx,mipi-csi2-rx-subsystem-2.0",
+ .data = &xlnx_csi2rxss_v2_0 },
+ { .compatible = "xlnx,mipi-csi2-rx-subsystem-3.0",
+ .data = &xlnx_csi2rxss_v2_0 },
+ { .compatible = "xlnx,mipi-csi2-rx-subsystem-4.0",
+ .data = &xlnx_csi2rxss_v4_0 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xcsi2rxss_of_id_table);
+
+static inline struct xcsi2rxss_state *
+to_xcsi2rxssstate(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xcsi2rxss_state, subdev);
+}
+
+/*
+ * Regsiter related operations
+ */
+static inline u32 xcsi2rxss_read(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr)
+{
+ return ioread32(xcsi2rxss->iomem + addr);
+}
+
+static inline void xcsi2rxss_write(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr,
+ u32 value)
+{
+ iowrite32(value, xcsi2rxss->iomem + addr);
+}
+
+static inline void xcsi2rxss_clr(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr,
+ u32 clr)
+{
+ xcsi2rxss_write(xcsi2rxss,
+ addr,
+ xcsi2rxss_read(xcsi2rxss, addr) & ~clr);
+}
+
+static inline void xcsi2rxss_set(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr,
+ u32 set)
+{
+ xcsi2rxss_write(xcsi2rxss,
+ addr,
+ xcsi2rxss_read(xcsi2rxss, addr) | set);
+}
+
+static const struct pixel_format pixel_formats[] = {
+ { MIPI_CSI_DT_YUV_420_8B, "YUV420_8bit" },
+ { MIPI_CSI_DT_YUV_420_10B, "YUV420_10bit" },
+ { MIPI_CSI_DT_YUV_420_8B_LEGACY, "Legacy_YUV420_8bit" },
+ { MIPI_CSI_DT_YUV_420_8B_CSPS, "YUV420_8bit_CSPS" },
+ { MIPI_CSI_DT_YUV_420_10B_CSPS, "YUV420_10bit_CSPS" },
+ { MIPI_CSI_DT_YUV_422_8B, "YUV422_8bit" },
+ { MIPI_CSI_DT_YUV_422_10B, "YUV422_10bit" },
+ { MIPI_CSI_DT_RGB_444, "RGB444" },
+ { MIPI_CSI_DT_RGB_555, "RGB555" },
+ { MIPI_CSI_DT_RGB_565, "RGB565" },
+ { MIPI_CSI_DT_RGB_666, "RGB666" },
+ { MIPI_CSI_DT_RGB_888, "RGB888" },
+ { MIPI_CSI_DT_RAW_6, "RAW6" },
+ { MIPI_CSI_DT_RAW_7, "RAW7" },
+ { MIPI_CSI_DT_RAW_8, "RAW8" },
+ { MIPI_CSI_DT_RAW_10, "RAW10" },
+ { MIPI_CSI_DT_RAW_12, "RAW12" },
+ { MIPI_CSI_DT_RAW_14, "RAW14"},
+ { MIPI_CSI_DT_RAW_16, "RAW16"},
+ { MIPI_CSI_DT_RAW_20, "RAW20"}
+};
+
+static struct xcsi2rxss_event xcsi2rxss_events[] = {
+ { XCSI_ISR_FR_MASK, "Frame Received", 0 },
+ { XCSI_ISR_VCX_MASK, "VCX Frame Errors", 0 },
+ { XCSI_ISR_ILC_MASK, "Invalid Lane Count Error", 0 },
+ { XCSI_ISR_SPFIFOF_MASK, "Short Packet FIFO OverFlow Error", 0 },
+ { XCSI_ISR_SPFIFONE_MASK, "Short Packet FIFO Not Empty", 0 },
+ { XCSI_ISR_SLBF_MASK, "Streamline Buffer Full Error", 0 },
+ { XCSI_ISR_STOP_MASK, "Lane Stop State", 0 },
+ { XCSI_ISR_SOTERR_MASK, "SOT Error", 0 },
+ { XCSI_ISR_SOTSYNCERR_MASK, "SOT Sync Error", 0 },
+ { XCSI_ISR_ECC2BERR_MASK, "2 Bit ECC Unrecoverable Error", 0 },
+ { XCSI_ISR_ECC1BERR_MASK, "1 Bit ECC Recoverable Error", 0 },
+ { XCSI_ISR_CRCERR_MASK, "CRC Error", 0 },
+ { XCSI_ISR_DATAIDERR_MASK, "Data Id Error", 0 },
+ { XCSI_ISR_VC3FSYNCERR_MASK, "Virtual Channel 3 Frame Sync Error", 0 },
+ { XCSI_ISR_VC3FLVLERR_MASK, "Virtual Channel 3 Frame Level Error", 0 },
+ { XCSI_ISR_VC2FSYNCERR_MASK, "Virtual Channel 2 Frame Sync Error", 0 },
+ { XCSI_ISR_VC2FLVLERR_MASK, "Virtual Channel 2 Frame Level Error", 0 },
+ { XCSI_ISR_VC1FSYNCERR_MASK, "Virtual Channel 1 Frame Sync Error", 0 },
+ { XCSI_ISR_VC1FLVLERR_MASK, "Virtual Channel 1 Frame Level Error", 0 },
+ { XCSI_ISR_VC0FSYNCERR_MASK, "Virtual Channel 0 Frame Sync Error", 0 },
+ { XCSI_ISR_VC0FLVLERR_MASK, "Virtual Channel 0 Frame Level Error", 0 }
+};
+
+#define XMIPICSISS_NUM_EVENTS ARRAY_SIZE(xcsi2rxss_events)
+
+#define XMIPICSISS_VCX_START (4)
+#define XMIPICSISS_MAX_VC (4)
+#define XMIPICSISS_MAX_VCX (16)
+
+/* There are 2 events frame sync and frame level error per VC */
+#define XMIPICSISS_VCX_NUM_EVENTS ((XMIPICSISS_MAX_VCX -\
+ XMIPICSISS_MAX_VC) * 2)
+
+/**
+ * xcsi2rxss_clr_and_set - Clear and set the register with a bitmask
+ * @xcsi2rxss: Xilinx MIPI CSI2 Rx Subsystem subdev core struct
+ * @addr: address of register
+ * @clr: bitmask to be cleared
+ * @set: bitmask to be set
+ *
+ * Clear a bit(s) of mask @clr in the register at address @addr, then set
+ * a bit(s) of mask @set in the register after.
+ */
+static void xcsi2rxss_clr_and_set(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr, u32 clr, u32 set)
+{
+ u32 reg;
+
+ reg = xcsi2rxss_read(xcsi2rxss, addr);
+ reg &= ~clr;
+ reg |= set;
+ xcsi2rxss_write(xcsi2rxss, addr, reg);
+}
+
+/**
+ * xcsi2rxss_pxlfmtstrtodt - Convert pixel format string got from dts
+ * to data type.
+ * @pxlfmtstr: String obtained while parsing device node
+ *
+ * This function takes a CSI pixel format string obtained while parsing
+ * device tree node and converts it to data type.
+ *
+ * Eg. "RAW8" string is converted to 0x2A.
+ * Refer to MIPI CSI2 specification for details.
+ *
+ * Return: Equivalent pixel format value from table
+ */
+static u32 xcsi2rxss_pxlfmtstrtodt(const char *pxlfmtstr)
+{
+ u32 Index;
+ u32 MaxEntries = ARRAY_SIZE(pixel_formats);
+
+ for (Index = 0; Index < MaxEntries; Index++) {
+ if (!strncmp(pixel_formats[Index].PixelFormatStr,
+ pxlfmtstr, MAX_XIL_CSIDT_STR_LENGTH))
+ return pixel_formats[Index].PixelFormat;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * xcsi2rxss_pxlfmtdttostr - Convert pixel format data type to string.
+ * @datatype: MIPI CSI-2 Data Type
+ *
+ * This function takes a CSI pixel format data type and returns a
+ * pointer to the string name.
+ *
+ * Eg. 0x2A returns "RAW8" string.
+ * Refer to MIPI CSI2 specification for details.
+ *
+ * Return: Equivalent pixel format string from table
+ */
+static const char *xcsi2rxss_pxlfmtdttostr(u32 datatype)
+{
+ u32 Index;
+ u32 MaxEntries = ARRAY_SIZE(pixel_formats);
+
+ for (Index = 0; Index < MaxEntries; Index++) {
+ if (pixel_formats[Index].PixelFormat == datatype)
+ return pixel_formats[Index].PixelFormatStr;
+ }
+
+ return NULL;
+}
+
+/**
+ * xcsi2rxss_enable - Enable or disable the CSI Core
+ * @core: Core Xilinx CSI2 Rx Subsystem structure pointer
+ * @flag: true for enabling, false for disabling
+ *
+ * This function enables/disables the MIPI CSI2 Rx Subsystem core.
+ * After enabling the CSI2 Rx core, the DPHY is enabled in case the
+ * register interface for it is present.
+ */
+static void xcsi2rxss_enable(struct xcsi2rxss_core *core, bool flag)
+{
+ u32 DphyCtrlRegOffset = core->dphy_offset + XDPHY_CTRLREG_OFFSET;
+
+ if (flag) {
+ xcsi2rxss_write(core, XCSI_CCR_OFFSET, XCSI_CCR_COREENB_MASK);
+ if (core->dphy_present)
+ xcsi2rxss_write(core, DphyCtrlRegOffset,
+ XDPHY_CTRLREG_DPHYEN_MASK);
+ } else {
+ xcsi2rxss_write(core, XCSI_CCR_OFFSET, 0);
+ if (core->dphy_present)
+ xcsi2rxss_write(core, DphyCtrlRegOffset, 0);
+ }
+
+}
+
+/**
+ * xcsi2rxss_interrupts_enable - Enable or disable CSI interrupts
+ * @core: Core Xilinx CSI2 Rx Subsystem structure pointer
+ * @flag: true for enabling, false for disabling
+ *
+ * This function enables/disables the interrupts for the MIPI CSI2
+ * Rx Subsystem.
+ */
+static void xcsi2rxss_interrupts_enable(struct xcsi2rxss_core *core, bool flag)
+{
+ if (flag) {
+ xcsi2rxss_clr(core, XCSI_GIER_OFFSET, XCSI_GIER_GIE_MASK);
+ xcsi2rxss_write(core, XCSI_IER_OFFSET, XCSI_INTR_MASK);
+ xcsi2rxss_set(core, XCSI_GIER_OFFSET, XCSI_GIER_GIE_MASK);
+ } else {
+ xcsi2rxss_clr(core, XCSI_IER_OFFSET, XCSI_INTR_MASK);
+ xcsi2rxss_clr(core, XCSI_GIER_OFFSET, XCSI_GIER_GIE_MASK);
+ }
+}
+
+/**
+ * xcsi2rxss_reset - Does a soft reset of the MIPI CSI2 Rx Subsystem
+ * @core: Core Xilinx CSI2 Rx Subsystem structure pointer
+ *
+ * Return: 0 - on success OR -ETIME if reset times out
+ */
+static int xcsi2rxss_reset(struct xcsi2rxss_core *core)
+{
+ u32 Timeout = XCSI_TIMEOUT_VAL;
+
+ xcsi2rxss_set(core, XCSI_CCR_OFFSET, XCSI_CCR_SOFTRESET_MASK);
+
+ while (xcsi2rxss_read(core, XCSI_CSR_OFFSET) & XCSI_CSR_RIPCD_MASK) {
+ if (Timeout == 0) {
+ dev_err(core->dev, "Xilinx CSI2 Rx Subsystem Soft Reset Timeout!\n");
+ return -ETIME;
+ }
+
+ Timeout--;
+ udelay(1);
+ }
+
+ xcsi2rxss_clr(core, XCSI_CCR_OFFSET, XCSI_CCR_SOFTRESET_MASK);
+ return 0;
+}
+
+/**
+ * xcsi2rxss_irq_handler - Interrupt handler for CSI-2
+ * @irq: IRQ number
+ * @dev_id: Pointer to device state
+ *
+ * In the interrupt handler, a list of event counters are updated for
+ * corresponding interrupts. This is useful to get status / debug.
+ * If the short packet FIFO not empty or overflow interrupt is received
+ * capture the short packet and notify of event occurrence
+ *
+ * Return: IRQ_HANDLED after handling interrupts
+ */
+static irqreturn_t xcsi2rxss_irq_handler(int irq, void *dev_id)
+{
+ struct xcsi2rxss_state *state = (struct xcsi2rxss_state *)dev_id;
+ struct xcsi2rxss_core *core = &state->core;
+ u32 status;
+
+ status = xcsi2rxss_read(core, XCSI_ISR_OFFSET) & XCSI_INTR_MASK;
+ dev_dbg(core->dev, "interrupt status = 0x%08x\n", status);
+
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XCSI_ISR_SPFIFONE_MASK) {
+
+ memset(&state->event, 0, sizeof(state->event));
+
+ state->event.type = V4L2_EVENT_XLNXCSIRX_SPKT;
+
+ *((u32 *)(&state->event.u.data)) =
+ xcsi2rxss_read(core, XCSI_SPKTR_OFFSET);
+
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XCSI_ISR_SPFIFOF_MASK) {
+ dev_alert(core->dev, "Short packet FIFO overflowed\n");
+
+ memset(&state->event, 0, sizeof(state->event));
+
+ state->event.type = V4L2_EVENT_XLNXCSIRX_SPKT_OVF;
+
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XCSI_ISR_SLBF_MASK) {
+ dev_alert(core->dev, "Stream Line Buffer Full!\n");
+
+ memset(&state->event, 0, sizeof(state->event));
+
+ state->event.type = V4L2_EVENT_XLNXCSIRX_SLBF;
+
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XCSI_ISR_ALLINTR_MASK) {
+ unsigned int i;
+
+ for (i = 0; i < XMIPICSISS_NUM_EVENTS; i++) {
+ if (!(status & core->events[i].mask))
+ continue;
+ core->events[i].counter++;
+ dev_dbg(core->dev, "%s: %d\n", core->events[i].name,
+ core->events[i].counter);
+ }
+
+ if (status & XCSI_ISR_VCX_MASK && core->en_vcx) {
+ u32 vcxstatus;
+
+ vcxstatus = xcsi2rxss_read(core, XCSI_VCXR_OFFSET);
+ vcxstatus &= XCSI_VCXR_MASK;
+ for (i = 0; i < XMIPICSISS_VCX_NUM_EVENTS; i++) {
+ if (!(vcxstatus & core->vcx_events[i].mask))
+ continue;
+ core->vcx_events[i].counter++;
+ }
+ xcsi2rxss_write(core, XCSI_VCXR_OFFSET, vcxstatus);
+ }
+ }
+
+ xcsi2rxss_write(core, XCSI_ISR_OFFSET, status);
+
+ return IRQ_HANDLED;
+}
+
+static void xcsi2rxss_reset_event_counters(struct xcsi2rxss_state *state)
+{
+ int i;
+
+ for (i = 0; i < XMIPICSISS_NUM_EVENTS; i++)
+ state->core.events[i].counter = 0;
+
+ if (!state->core.en_vcx)
+ return;
+
+ for (i = 0; i < XMIPICSISS_VCX_NUM_EVENTS; i++)
+ state->core.vcx_events[i].counter = 0;
+}
+
+/**
+ * xcsi2rxss_log_counters - Print out the event counters.
+ * @state: Pointer to device state
+ *
+ */
+static void xcsi2rxss_log_counters(struct xcsi2rxss_state *state)
+{
+ int i;
+
+ for (i = 0; i < XMIPICSISS_NUM_EVENTS; i++) {
+ if (state->core.events[i].counter > 0)
+ v4l2_info(&state->subdev, "%s events: %d\n",
+ state->core.events[i].name,
+ state->core.events[i].counter);
+ }
+
+ if (!state->core.en_vcx)
+ return;
+
+ for (i = 0; i < XMIPICSISS_VCX_NUM_EVENTS; i++) {
+ if (state->core.vcx_events[i].counter > 0)
+ v4l2_info(&state->subdev,
+ "VC %d Frame %s error vcx events: %d\n",
+ (i / 2) + XMIPICSISS_VCX_START,
+ i & 1 ? "Sync" : "Level",
+ state->core.vcx_events[i].counter);
+ }
+}
+
+/**
+ * xcsi2rxss_log_status - Logs the status of the CSI-2 Receiver
+ * @sd: Pointer to V4L2 subdevice structure
+ *
+ * This function prints the current status of Xilinx MIPI CSI-2
+ *
+ * Return: 0 on success
+ */
+static int xcsi2rxss_log_status(struct v4l2_subdev *sd)
+{
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+ struct xcsi2rxss_core *core = &xcsi2rxss->core;
+ u32 reg, data, i, max_vc;
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ xcsi2rxss_log_counters(xcsi2rxss);
+
+ v4l2_info(sd, "***** Core Status *****\n");
+ data = xcsi2rxss_read(core, XCSI_CSR_OFFSET);
+ v4l2_info(sd, "Short Packet FIFO Full = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CSR_SPFIFOFULL_MASK));
+ v4l2_info(sd, "Short Packet FIFO Not Empty = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CSR_SPFIFONE_MASK));
+ v4l2_info(sd, "Stream line buffer full = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CSR_SLBF_MASK));
+ v4l2_info(sd, "Soft reset/Core disable in progress = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CSR_RIPCD_MASK));
+
+ /* Clk & Lane Info */
+ v4l2_info(sd, "******** Clock Lane Info *********\n");
+ data = xcsi2rxss_read(core, XCSI_CLKINFR_OFFSET);
+ v4l2_info(sd, "Clock Lane in Stop State = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CLKINFR_STOP_MASK));
+
+ v4l2_info(sd, "******** Data Lane Info *********\n");
+ v4l2_info(sd, "Lane\tSoT Error\tSoT Sync Error\tStop State\n");
+ reg = XCSI_L0INFR_OFFSET;
+ for (i = 0; i < 4; i++) {
+ data = xcsi2rxss_read(core, reg);
+
+ v4l2_info(sd, "%d\t%s\t\t%s\t\t%s\n",
+ i,
+ XCSI_GET_BITSET_STR(data, XCSI_LXINFR_SOTERR_MASK),
+ XCSI_GET_BITSET_STR(data, XCSI_LXINFR_SOTSYNCERR_MASK),
+ XCSI_GET_BITSET_STR(data, XCSI_LXINFR_STOP_MASK));
+
+ reg += 4;
+ }
+
+ /* Virtual Channel Image Information */
+ v4l2_info(sd, "********** Virtual Channel Info ************\n");
+ v4l2_info(sd, "VC\tLine Count\tByte Count\tData Type\n");
+ if (core->en_vcx)
+ max_vc = XMIPICSISS_MAX_VCX;
+ else
+ max_vc = XMIPICSISS_MAX_VC;
+
+ reg = XCSI_VC0INF1R_OFFSET;
+ for (i = 0; i < max_vc; i++) {
+ u32 line_count, byte_count, data_type;
+ char *datatypestr;
+
+ /* Get line and byte count from VCXINFR1 Register */
+ data = xcsi2rxss_read(core, reg);
+ byte_count = (data & XCSI_VCXINF1R_BYTECOUNT_MASK) >>
+ XCSI_VCXINF1R_BYTECOUNT_SHIFT;
+ line_count = (data & XCSI_VCXINF1R_LINECOUNT_MASK) >>
+ XCSI_VCXINF1R_LINECOUNT_SHIFT;
+
+ /* Get data type from VCXINFR2 Register */
+ reg += 4;
+ data = xcsi2rxss_read(core, reg);
+ data_type = (data & XCSI_VCXINF2R_DATATYPE_MASK) >>
+ XCSI_VCXINF2R_DATATYPE_SHIFT;
+ datatypestr = (char *)xcsi2rxss_pxlfmtdttostr(data_type);
+
+ v4l2_info(sd, "%d\t%d\t\t%d\t\t%s\n",
+ i, line_count, byte_count, datatypestr);
+
+ /* Move to next pair of VC Info registers */
+ reg += 4;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return 0;
+}
+
+/*
+ * xcsi2rxss_subscribe_event - Subscribe to the custom short packet
+ * receive event.
+ * @sd: V4L2 Sub device
+ * @fh: V4L2 File Handle
+ * @sub: Subcribe event structure
+ *
+ * There are two types of events to be subscribed.
+ *
+ * First is to register for receiving a short packet.
+ * The short packets received are queued up in a FIFO.
+ * On reception of a short packet, an event will be generated
+ * with the short packet contents copied to its data area.
+ * Application subscribed to this event will poll for POLLPRI.
+ * On getting the event, the app dequeues the event to get the short packet
+ * data.
+ *
+ * Second is to register for Short packet FIFO overflow
+ * In case the rate of receiving short packets is high and
+ * the short packet FIFO overflows, this event will be triggered.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xcsi2rxss_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ switch (sub->type) {
+ case V4L2_EVENT_XLNXCSIRX_SPKT:
+ case V4L2_EVENT_XLNXCSIRX_SPKT_OVF:
+ case V4L2_EVENT_XLNXCSIRX_SLBF:
+ ret = v4l2_event_subscribe(fh, sub, XCSI_MAX_SPKT, NULL);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return ret;
+}
+
+/**
+ * xcsi2rxss_unsubscribe_event - Unsubscribe from all events registered
+ * @sd: V4L2 Sub device
+ * @fh: V4L2 file handle
+ * @sub: pointer to Event unsubscription structure
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static int xcsi2rxss_unsubscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret = 0;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ mutex_lock(&xcsi2rxss->lock);
+ ret = v4l2_event_unsubscribe(fh, sub);
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return ret;
+}
+
+/**
+ * xcsi2rxss_s_ctrl - This is used to set the Xilinx MIPI CSI-2 V4L2 controls
+ * @ctrl: V4L2 control to be set
+ *
+ * This function is used to set the V4L2 controls for the Xilinx MIPI
+ * CSI-2 Rx Subsystem. It is used to set the active lanes in the system.
+ * The event counters can be reset.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xcsi2rxss_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int ret = 0;
+ u32 Timeout = XCSI_TIMEOUT_VAL;
+ u32 active_lanes = 1;
+
+ struct xcsi2rxss_state *xcsi2rxss =
+ container_of(ctrl->handler,
+ struct xcsi2rxss_state, ctrl_handler);
+ struct xcsi2rxss_core *core = &xcsi2rxss->core;
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_MIPICSISS_ACT_LANES:
+ /*
+ * This will be called only when "Enable Active Lanes" parameter
+ * is set in design
+ */
+ xcsi2rxss_clr_and_set(core, XCSI_PCR_OFFSET,
+ XCSI_PCR_ACTLANES_MASK, ctrl->val - 1);
+
+ /*
+ * If the core is enabled, wait for active lanes to be
+ * set.
+ *
+ * If core is disabled or there is no clock from DPHY Tx
+ * then the read back won't reflect the updated value
+ * as the PPI clock will not be present.
+ */
+
+ if (core->dphy_present) {
+ u32 dphyclkstatregoffset = core->dphy_offset +
+ XDPHY_CLKSTATREG_OFFSET;
+
+ u32 dphyclkstat =
+ xcsi2rxss_read(core, dphyclkstatregoffset) &
+ XDPHY_CLKSTATREG_MODE_MASK;
+
+ u32 coreenable =
+ xcsi2rxss_read(core, XCSI_CCR_OFFSET) &
+ XCSI_CCR_COREENB_MASK;
+
+ char lpmstr[] = "Low Power";
+ char hsmstr[] = "High Speed";
+ char esmstr[] = "Escape";
+ char *modestr;
+
+ switch (dphyclkstat) {
+ case 0:
+ modestr = lpmstr;
+ break;
+ case 1:
+ modestr = hsmstr;
+ break;
+ case 2:
+ modestr = esmstr;
+ break;
+ default:
+ modestr = NULL;
+ break;
+ }
+
+ dev_dbg(core->dev, "DPHY Clock Lane in %s mode\n",
+ modestr);
+
+ if ((dphyclkstat == XDPHY_HI_SPEED_MODE) &&
+ coreenable) {
+
+ /* Wait for core to apply new active lanes */
+ while (Timeout--)
+ udelay(1);
+
+ active_lanes =
+ xcsi2rxss_read(core, XCSI_PCR_OFFSET);
+ active_lanes &= XCSI_PCR_ACTLANES_MASK;
+ active_lanes++;
+
+ if (active_lanes != ctrl->val) {
+ dev_err(core->dev, "Failed to set active lanes!\n");
+ ret = -EAGAIN;
+ }
+ }
+ } else {
+ dev_dbg(core->dev, "No read back as no DPHY present.\n");
+ }
+
+ dev_dbg(core->dev, "Set active lanes: requested = %d, active = %d\n",
+ ctrl->val, active_lanes);
+ break;
+ case V4L2_CID_XILINX_MIPICSISS_RESET_COUNTERS:
+ xcsi2rxss_reset_event_counters(xcsi2rxss);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return ret;
+}
+
+/**
+ * xcsi2rxss_g_volatile_ctrl - get the Xilinx MIPI CSI-2 Rx controls
+ * @ctrl: Pointer to V4L2 control
+ *
+ * This is used to get the number of frames received by the Xilinx
+ * MIPI CSI-2 Rx.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xcsi2rxss_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int ret = 0;
+ struct xcsi2rxss_state *xcsi2rxss =
+ container_of(ctrl->handler,
+ struct xcsi2rxss_state, ctrl_handler);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_MIPICSISS_FRAME_COUNTER:
+ ctrl->val = xcsi2rxss->core.events[0].counter;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return ret;
+}
+
+static int xcsi2rxss_start_stream(struct xcsi2rxss_state *xcsi2rxss)
+{
+ int ret;
+
+ xcsi2rxss_enable(&xcsi2rxss->core, true);
+
+ ret = xcsi2rxss_reset(&xcsi2rxss->core);
+ if (ret < 0)
+ return ret;
+
+ xcsi2rxss_interrupts_enable(&xcsi2rxss->core, true);
+
+ return 0;
+}
+
+static void xcsi2rxss_stop_stream(struct xcsi2rxss_state *xcsi2rxss)
+{
+ xcsi2rxss_interrupts_enable(&xcsi2rxss->core, false);
+ xcsi2rxss_enable(&xcsi2rxss->core, false);
+}
+
+/**
+ * xcsi2rxss_s_stream - It is used to start/stop the streaming.
+ * @sd: V4L2 Sub device
+ * @enable: Flag (True / False)
+ *
+ * This function controls the start or stop of streaming for the
+ * Xilinx MIPI CSI-2 Rx Subsystem provided the device isn't in
+ * suspended state.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xcsi2rxss_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ int ret = 0;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ if (xcsi2rxss->suspended) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (enable) {
+ if (!xcsi2rxss->streaming) {
+ /* reset the event counters */
+ xcsi2rxss_reset_event_counters(xcsi2rxss);
+
+ ret = xcsi2rxss_start_stream(xcsi2rxss);
+ if (ret == 0)
+ xcsi2rxss->streaming = true;
+ }
+ } else {
+ if (xcsi2rxss->streaming) {
+ xcsi2rxss_stop_stream(xcsi2rxss);
+ xcsi2rxss->streaming = false;
+ }
+ }
+unlock:
+ mutex_unlock(&xcsi2rxss->lock);
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__xcsi2rxss_get_pad_format(struct xcsi2rxss_state *xcsi2rxss,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xcsi2rxss->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xcsi2rxss->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * xcsi2rxss_get_format - Get the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to get the pad format information.
+ *
+ * Return: 0 on success
+ */
+static int xcsi2rxss_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ mutex_lock(&xcsi2rxss->lock);
+ fmt->format = *__xcsi2rxss_get_pad_format(xcsi2rxss, cfg,
+ fmt->pad, fmt->which);
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return 0;
+}
+
+/**
+ * xcsi2rxss_set_format - This is used to set the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to set the pad format.
+ * Since the pad format is fixed in hardware, it can't be
+ * modified on run time. So when a format set is requested by
+ * application, all parameters except the format type is
+ * saved for the pad and the original pad format is sent
+ * back to the application.
+ *
+ * Return: 0 on success
+ */
+static int xcsi2rxss_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *__format;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+ struct xcsi2rxss_core *core = &xcsi2rxss->core;
+ u32 code;
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ /*
+ * Only the format->code parameter matters for CSI as the
+ * CSI format cannot be changed at runtime.
+ * Ensure that format to set is copied to over to CSI pad format
+ */
+ __format = __xcsi2rxss_get_pad_format(xcsi2rxss, cfg,
+ fmt->pad, fmt->which);
+
+ /* Save the pad format code */
+ code = __format->code;
+
+ /* If the bayer pattern to be set is SXXXX8 then only 1x8 type
+ * is supported and core's data type doesn't matter.
+ * In case the bayer pattern being set is SXXX10 then only
+ * 1x10 type are supported and core should be configured for RAW10.
+ * In case the bayer pattern being set is SXXX12 then only
+ * 1x12 type are supported and core should be configured for RAW12.
+ *
+ * Otherwise don't allow change.
+ */
+ if (((fmt->format.code == MEDIA_BUS_FMT_SBGGR8_1X8) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGBRG8_1X8) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGRBG8_1X8) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SRGGB8_1X8))
+ || ((core->datatype == MIPI_CSI_DT_RAW_10) &&
+ ((fmt->format.code == MEDIA_BUS_FMT_SBGGR10_1X10) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGBRG10_1X10) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGRBG10_1X10) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SRGGB10_1X10)))
+ || ((core->datatype == MIPI_CSI_DT_RAW_12) &&
+ ((fmt->format.code == MEDIA_BUS_FMT_SBGGR12_1X12) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGBRG12_1X12) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGRBG12_1X12) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SRGGB12_1X12))) ||
+ ((core->datatype == MIPI_CSI_DT_RAW_16) &&
+ ((fmt->format.code == MEDIA_BUS_FMT_SBGGR16_1X16) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGBRG16_1X16) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGRBG16_1X16) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SRGGB16_1X16))))
+
+ /* Copy over the format to be set */
+ *__format = fmt->format;
+ else {
+ /* Restore the original pad format code */
+ fmt->format.code = code;
+ __format->code = code;
+ __format->width = fmt->format.width;
+ __format->height = fmt->format.height;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return 0;
+}
+
+/**
+ * xcsi2rxss_open - Called on v4l2_open()
+ * @sd: Pointer to V4L2 sub device structure
+ * @fh: Pointer to V4L2 File handle
+ *
+ * This function is called on v4l2_open(). It sets the default format
+ * for both pads.
+ *
+ * Return: 0 on success
+ */
+static int xcsi2rxss_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ *format = xcsi2rxss->default_format;
+
+ format = v4l2_subdev_get_try_format(sd, fh->pad, 1);
+ *format = xcsi2rxss->default_format;
+
+ return 0;
+}
+
+static int xcsi2rxss_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xcsi2rxss_media_ops = {
+ .link_validate = v4l2_subdev_link_validate
+};
+
+static const struct v4l2_ctrl_ops xcsi2rxss_ctrl_ops = {
+ .g_volatile_ctrl = xcsi2rxss_g_volatile_ctrl,
+ .s_ctrl = xcsi2rxss_s_ctrl
+};
+
+static struct v4l2_ctrl_config xcsi2rxss_ctrls[] = {
+ {
+ .ops = &xcsi2rxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_MIPICSISS_ACT_LANES,
+ .name = "MIPI CSI2 Rx Subsystem: Active Lanes",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 4,
+ .step = 1,
+ .def = 1,
+ }, {
+ .ops = &xcsi2rxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_MIPICSISS_FRAME_COUNTER,
+ .name = "MIPI CSI2 Rx Subsystem: Frames Received Counter",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFFFFFF,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xcsi2rxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_MIPICSISS_RESET_COUNTERS,
+ .name = "MIPI CSI2 Rx Subsystem: Reset Counters",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_WRITE_ONLY,
+ }
+};
+
+static const struct v4l2_subdev_core_ops xcsi2rxss_core_ops = {
+ .log_status = xcsi2rxss_log_status,
+ .subscribe_event = xcsi2rxss_subscribe_event,
+ .unsubscribe_event = xcsi2rxss_unsubscribe_event
+};
+
+static struct v4l2_subdev_video_ops xcsi2rxss_video_ops = {
+ .s_stream = xcsi2rxss_s_stream
+};
+
+static struct v4l2_subdev_pad_ops xcsi2rxss_pad_ops = {
+ .get_fmt = xcsi2rxss_get_format,
+ .set_fmt = xcsi2rxss_set_format,
+};
+
+static struct v4l2_subdev_ops xcsi2rxss_ops = {
+ .core = &xcsi2rxss_core_ops,
+ .video = &xcsi2rxss_video_ops,
+ .pad = &xcsi2rxss_pad_ops
+};
+
+static const struct v4l2_subdev_internal_ops xcsi2rxss_internal_ops = {
+ .open = xcsi2rxss_open,
+ .close = xcsi2rxss_close
+};
+
+/* -----------------------------------------------------------------------------
+ * Power Management
+ */
+
+/*
+ * xcsi2rxss_pm_suspend - Function called on Power Suspend
+ * @dev: Pointer to device structure
+ *
+ * On power suspend the CSI-2 Core is disabled if the device isn't
+ * in suspended state and is streaming.
+ *
+ * Return: 0 on success
+ */
+static int __maybe_unused xcsi2rxss_pm_suspend(struct device *dev)
+{
+ struct xcsi2rxss_state *xcsi2rxss = dev_get_drvdata(dev);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ if (!xcsi2rxss->suspended && xcsi2rxss->streaming)
+ xcsi2rxss_clr(&xcsi2rxss->core,
+ XCSI_CCR_OFFSET, XCSI_CCR_COREENB_MASK);
+
+ xcsi2rxss->suspended = true;
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return 0;
+}
+
+/*
+ * xcsi2rxss_pm_resume - Function called on Power Resume
+ * @dev: Pointer to device structure
+ *
+ * On power resume the CSI-2 Core is enabled when it is in suspended state
+ * and prior to entering suspended state it was streaming.
+ *
+ * Return: 0 on success
+ */
+static int __maybe_unused xcsi2rxss_pm_resume(struct device *dev)
+{
+ struct xcsi2rxss_state *xcsi2rxss = dev_get_drvdata(dev);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ if ((xcsi2rxss->suspended) && (xcsi2rxss->streaming))
+ xcsi2rxss_set(&xcsi2rxss->core,
+ XCSI_CCR_OFFSET, XCSI_CCR_COREENB_MASK);
+
+ xcsi2rxss->suspended = false;
+
+ mutex_unlock(&xcsi2rxss->lock);
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xcsi2rxss_parse_of(struct xcsi2rxss_state *xcsi2rxss)
+{
+ struct device_node *node = xcsi2rxss->core.dev->of_node;
+ struct device_node *ports = NULL;
+ struct device_node *port = NULL;
+ unsigned int nports = 0;
+ struct xcsi2rxss_core *core = &xcsi2rxss->core;
+ int ret;
+ bool iic_present;
+
+ if (core->cfg->flags & XCSI_CLK_PROP) {
+ core->lite_aclk = devm_clk_get(core->dev, "lite_aclk");
+ if (IS_ERR(core->lite_aclk)) {
+ ret = PTR_ERR(core->lite_aclk);
+ dev_err(core->dev, "failed to get lite_aclk (%d)\n",
+ ret);
+ return ret;
+ }
+
+ core->video_aclk = devm_clk_get(core->dev, "video_aclk");
+ if (IS_ERR(core->video_aclk)) {
+ ret = PTR_ERR(core->video_aclk);
+ dev_err(core->dev, "failed to get video_aclk (%d)\n",
+ ret);
+ return ret;
+ }
+
+ core->dphy_clk_200M = devm_clk_get(core->dev, "dphy_clk_200M");
+ if (IS_ERR(core->dphy_clk_200M)) {
+ ret = PTR_ERR(core->dphy_clk_200M);
+ dev_err(core->dev, "failed to get dphy_clk_200M (%d)\n",
+ ret);
+ return ret;
+ }
+ } else {
+ dev_info(core->dev, "assuming all required clocks are enabled!\n");
+ }
+
+ core->dphy_present = of_property_read_bool(node, "xlnx,dphy-present");
+ dev_dbg(core->dev, "DPHY present property = %s\n",
+ core->dphy_present ? "Present" : "Absent");
+
+ iic_present = of_property_read_bool(node, "xlnx,iic-present");
+ dev_dbg(core->dev, "IIC present property = %s\n",
+ iic_present ? "Present" : "Absent");
+
+ if (core->dphy_present) {
+ if (iic_present)
+ core->dphy_offset = 0x20000;
+ else
+ core->dphy_offset = 0x10000;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-lanes",
+ &core->max_num_lanes);
+ if (ret < 0) {
+ dev_err(core->dev, "missing xlnx,max-lanes property\n");
+ return ret;
+ }
+
+ if ((core->max_num_lanes > 4) || (core->max_num_lanes < 1)) {
+ dev_err(core->dev, "%d max lanes : invalid xlnx,max-lanes property\n",
+ core->max_num_lanes);
+ return -EINVAL;
+ }
+
+ core->en_vcx = of_property_read_bool(node, "xlnx,en-vcx");
+
+ ret = of_property_read_u32(node, "xlnx,vc", &core->vc);
+ if (ret < 0) {
+ dev_err(core->dev, "missing xlnx,vc property\n");
+ return ret;
+ }
+ if ((core->vc > XMIPICSISS_MAX_VC && !core->en_vcx) ||
+ (core->vc > XMIPICSISS_MAX_VCX && core->en_vcx)) {
+ dev_err(core->dev, "invalid virtual channel property value.\n");
+ return -EINVAL;
+ }
+
+ core->enable_active_lanes =
+ of_property_read_bool(node, "xlnx,en-active-lanes");
+ dev_dbg(core->dev, "Enable active lanes property = %s\n",
+ core->enable_active_lanes ? "Present" : "Absent");
+
+ ret = of_property_read_string(node, "xlnx,csi-pxl-format",
+ &core->pxlformat);
+ if (ret < 0) {
+ dev_err(core->dev, "missing xlnx,csi-pxl-format property\n");
+ return ret;
+ }
+
+ core->datatype = xcsi2rxss_pxlfmtstrtodt(core->pxlformat);
+ if ((core->datatype < MIPI_CSI_DT_YUV_420_8B) ||
+ (core->datatype > MIPI_CSI_DT_RAW_20)) {
+ dev_err(core->dev, "Invalid xlnx,csi-pxl-format string\n");
+ return -EINVAL;
+ }
+
+ core->vfb = of_property_read_bool(node, "xlnx,vfb");
+ dev_dbg(core->dev, "Video Format Bridge property = %s\n",
+ core->vfb ? "Present" : "Absent");
+
+ if (core->vfb) {
+ ret = of_property_read_u32(node, "xlnx,ppc", &core->ppc);
+ if ((ret < 0) || !((core->ppc == 1) ||
+ (core->ppc == 2) || (core->ppc == 4))) {
+ dev_err(core->dev, "Invalid xlnx,ppc property ret = %d ppc = %d\n",
+ ret, core->ppc);
+ return -EINVAL;
+ }
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ for_each_child_of_node(ports, port) {
+ int ret;
+ const struct xvip_video_format *format;
+ struct device_node *endpoint;
+ struct v4l2_fwnode_endpoint v4lendpoint;
+
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+
+ /*
+ * Currently only a subset of VFB enabled formats present in
+ * xvip are supported in the driver.
+ *
+ * If the VFB is disabled, the pixels per clock don't matter.
+ * The data width is either 32 or 64 bit as selected in design.
+ *
+ * For e.g. If Data Type is RGB888, VFB is disabled and
+ * data width is 32 bits.
+ *
+ * Clk Cycle | Byte 0 | Byte 1 | Byte 2 | Byte 3
+ * -----------+----------+----------+----------+----------
+ * 1 | B0 | G0 | R0 | B1
+ * 2 | G1 | R1 | B2 | G2
+ * 3 | R2 | B3 | G3 | R3
+ */
+ format = xvip_of_get_format(port);
+ if (IS_ERR(format)) {
+ dev_err(core->dev, "invalid format in DT");
+ return PTR_ERR(format);
+ }
+
+ if (core->vfb &&
+ (format->vf_code != XVIP_VF_YUV_422) &&
+ (format->vf_code != XVIP_VF_RBG) &&
+ (format->vf_code != XVIP_VF_MONO_SENSOR)) {
+ dev_err(core->dev, "Invalid UG934 video format set.\n");
+ return -EINVAL;
+ }
+
+ /* Get and check the format description */
+ if (!xcsi2rxss->vip_format) {
+ xcsi2rxss->vip_format = format;
+ } else if (xcsi2rxss->vip_format != format) {
+ dev_err(core->dev, "in/out format mismatch in DT");
+ return -EINVAL;
+ }
+
+ endpoint = of_get_next_child(port, NULL);
+ if (!endpoint) {
+ dev_err(core->dev, "No port at\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
+ &v4lendpoint);
+ if (ret) {
+ of_node_put(endpoint);
+ return ret;
+ }
+
+ of_node_put(endpoint);
+ dev_dbg(core->dev, "%s : port %d bus type = %d\n",
+ __func__, nports, v4lendpoint.bus_type);
+
+ if (v4lendpoint.bus_type == V4L2_MBUS_CSI2_DPHY) {
+ dev_dbg(core->dev, "%s : base.port = %d base.id = %d\n",
+ __func__,
+ v4lendpoint.base.port,
+ v4lendpoint.base.id);
+
+ dev_dbg(core->dev, "%s : mipi number lanes = %d\n",
+ __func__,
+ v4lendpoint.bus.mipi_csi2.num_data_lanes);
+ } else {
+ dev_dbg(core->dev, "%s : Not a CSI2 bus\n", __func__);
+ }
+
+ /* Count the number of ports. */
+ nports++;
+ }
+
+ if (nports != 2) {
+ dev_err(core->dev, "invalid number of ports %u\n", nports);
+ return -EINVAL;
+ }
+ xcsi2rxss->npads = nports;
+
+ /*Register interrupt handler */
+ core->irq = irq_of_parse_and_map(node, 0);
+
+ ret = devm_request_irq(core->dev, core->irq, xcsi2rxss_irq_handler,
+ IRQF_SHARED, "xilinx-csi2rxss", xcsi2rxss);
+ if (ret) {
+ dev_err(core->dev, "Err = %d Interrupt handler reg failed!\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xcsi2rxss_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xcsi2rxss_state *xcsi2rxss;
+ struct resource *res;
+ const struct of_device_id *match;
+ struct device_node *node = pdev->dev.of_node;
+ u32 i;
+ int ret;
+ int num_ctrls;
+
+ xcsi2rxss = devm_kzalloc(&pdev->dev, sizeof(*xcsi2rxss), GFP_KERNEL);
+ if (!xcsi2rxss)
+ return -ENOMEM;
+
+ mutex_init(&xcsi2rxss->lock);
+
+ xcsi2rxss->core.dev = &pdev->dev;
+
+ match = of_match_node(xcsi2rxss_of_id_table, node);
+ if (!match)
+ return -ENODEV;
+
+ xcsi2rxss->core.cfg = match->data;
+
+ ret = xcsi2rxss_parse_of(xcsi2rxss);
+ if (ret < 0)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xcsi2rxss->core.iomem = devm_ioremap_resource(xcsi2rxss->core.dev, res);
+ if (IS_ERR(xcsi2rxss->core.iomem))
+ return PTR_ERR(xcsi2rxss->core.iomem);
+
+ if (xcsi2rxss->core.cfg->flags & XCSI_CLK_PROP) {
+ unsigned long rate;
+
+ ret = clk_prepare_enable(xcsi2rxss->core.lite_aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable lite_aclk (%d)\n",
+ ret);
+ goto clk_err;
+ }
+
+ ret = clk_prepare_enable(xcsi2rxss->core.video_aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable video_aclk (%d)\n",
+ ret);
+ goto video_aclk_err;
+ }
+
+ ret = clk_prepare_enable(xcsi2rxss->core.dphy_clk_200M);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable dphy clk (%d)\n",
+ ret);
+ goto dphy_clk_err;
+ }
+
+ ret = clk_set_rate(xcsi2rxss->core.dphy_clk_200M,
+ XCSI_DPHY_CLK_REQ);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set dphy clk rate (%d)\n",
+ ret);
+
+ goto all_clk_err;
+ }
+
+ rate = clk_get_rate(xcsi2rxss->core.dphy_clk_200M);
+ if (rate < XCSI_DPHY_CLK_MIN && rate > XCSI_DPHY_CLK_MAX) {
+ dev_err(&pdev->dev, "Err DPHY Clock = %lu\n",
+ rate);
+ ret = -EINVAL;
+ goto all_clk_err;
+ }
+ }
+
+ /*
+ * Reset and initialize the core.
+ */
+ xcsi2rxss_reset(&xcsi2rxss->core);
+
+ xcsi2rxss->core.events = (struct xcsi2rxss_event *)&xcsi2rxss_events;
+
+ if (xcsi2rxss->core.en_vcx) {
+ u32 alloc_size;
+
+ alloc_size = sizeof(struct xcsi2rxss_event) *
+ XMIPICSISS_VCX_NUM_EVENTS;
+ xcsi2rxss->core.vcx_events = devm_kzalloc(&pdev->dev,
+ alloc_size,
+ GFP_KERNEL);
+ if (!xcsi2rxss->core.vcx_events) {
+ mutex_destroy(&xcsi2rxss->lock);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < XMIPICSISS_VCX_NUM_EVENTS; i++)
+ xcsi2rxss->core.vcx_events[i].mask = 1 << i;
+ }
+
+ /* Initialize V4L2 subdevice and media entity */
+ xcsi2rxss->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ xcsi2rxss->pads[1].flags = MEDIA_PAD_FL_SINK;
+
+ /* Initialize the default format */
+ memset(&xcsi2rxss->default_format, 0,
+ sizeof(xcsi2rxss->default_format));
+ xcsi2rxss->default_format.code = xcsi2rxss->vip_format->code;
+ xcsi2rxss->default_format.field = V4L2_FIELD_NONE;
+ xcsi2rxss->default_format.colorspace = V4L2_COLORSPACE_SRGB;
+ xcsi2rxss->default_format.width = XCSI_DEFAULT_WIDTH;
+ xcsi2rxss->default_format.height = XCSI_DEFAULT_HEIGHT;
+
+ xcsi2rxss->formats[0] = xcsi2rxss->default_format;
+ xcsi2rxss->formats[1] = xcsi2rxss->default_format;
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xcsi2rxss->subdev;
+ v4l2_subdev_init(subdev, &xcsi2rxss_ops);
+
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xcsi2rxss_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ subdev->entity.ops = &xcsi2rxss_media_ops;
+
+ v4l2_set_subdevdata(subdev, xcsi2rxss);
+
+ ret = media_entity_pads_init(&subdev->entity, 2, xcsi2rxss->pads);
+ if (ret < 0)
+ goto error;
+
+ /*
+ * In case the Enable Active Lanes config parameter is not set,
+ * dynamic lane reconfiguration is not allowed.
+ * So V4L2_CID_XILINX_MIPICSISS_ACT_LANES ctrl will not be registered.
+ * Accordingly allocate the number of controls
+ */
+ num_ctrls = ARRAY_SIZE(xcsi2rxss_ctrls);
+
+ if (!xcsi2rxss->core.enable_active_lanes)
+ num_ctrls--;
+
+ dev_dbg(xcsi2rxss->core.dev, "# of ctrls = %d\n", num_ctrls);
+
+ v4l2_ctrl_handler_init(&xcsi2rxss->ctrl_handler, num_ctrls);
+
+ for (i = 0; i < ARRAY_SIZE(xcsi2rxss_ctrls); i++) {
+ struct v4l2_ctrl *ctrl;
+
+ if (xcsi2rxss_ctrls[i].id ==
+ V4L2_CID_XILINX_MIPICSISS_ACT_LANES) {
+
+ if (xcsi2rxss->core.enable_active_lanes) {
+ xcsi2rxss_ctrls[i].max =
+ xcsi2rxss->core.max_num_lanes;
+ } else {
+ /* Don't register control */
+ dev_dbg(xcsi2rxss->core.dev,
+ "Skip active lane control\n");
+ continue;
+ }
+ }
+
+ dev_dbg(xcsi2rxss->core.dev, "%d ctrl = 0x%x\n",
+ i, xcsi2rxss_ctrls[i].id);
+ ctrl = v4l2_ctrl_new_custom(&xcsi2rxss->ctrl_handler,
+ &xcsi2rxss_ctrls[i], NULL);
+ if (!ctrl) {
+ dev_err(xcsi2rxss->core.dev, "Failed for %s ctrl\n",
+ xcsi2rxss_ctrls[i].name);
+ goto error;
+ }
+ }
+
+ dev_dbg(xcsi2rxss->core.dev, "# v4l2 ctrls registered = %d\n", i - 1);
+
+ if (xcsi2rxss->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xcsi2rxss->ctrl_handler.error;
+ goto error;
+ }
+
+ subdev->ctrl_handler = &xcsi2rxss->ctrl_handler;
+
+ ret = v4l2_ctrl_handler_setup(&xcsi2rxss->ctrl_handler);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to set controls\n");
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, xcsi2rxss);
+
+ dev_info(xcsi2rxss->core.dev, "Xilinx CSI2 Rx Subsystem device found!\n");
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ /* default states for streaming and suspend */
+ xcsi2rxss->streaming = false;
+ xcsi2rxss->suspended = false;
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xcsi2rxss->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ mutex_destroy(&xcsi2rxss->lock);
+
+all_clk_err:
+ clk_disable_unprepare(xcsi2rxss->core.dphy_clk_200M);
+dphy_clk_err:
+ clk_disable_unprepare(xcsi2rxss->core.video_aclk);
+video_aclk_err:
+ clk_disable_unprepare(xcsi2rxss->core.lite_aclk);
+clk_err:
+ return ret;
+}
+
+static int xcsi2rxss_remove(struct platform_device *pdev)
+{
+ struct xcsi2rxss_state *xcsi2rxss = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xcsi2rxss->subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xcsi2rxss->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ mutex_destroy(&xcsi2rxss->lock);
+ clk_disable_unprepare(xcsi2rxss->core.dphy_clk_200M);
+ clk_disable_unprepare(xcsi2rxss->core.video_aclk);
+ clk_disable_unprepare(xcsi2rxss->core.lite_aclk);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xcsi2rxss_pm_ops,
+ xcsi2rxss_pm_suspend, xcsi2rxss_pm_resume);
+
+static struct platform_driver xcsi2rxss_driver = {
+ .driver = {
+ .name = "xilinx-csi2rxss",
+ .pm = &xcsi2rxss_pm_ops,
+ .of_match_table = xcsi2rxss_of_id_table,
+ },
+ .probe = xcsi2rxss_probe,
+ .remove = xcsi2rxss_remove,
+};
+
+module_platform_driver(xcsi2rxss_driver);
+
+MODULE_AUTHOR("Vishal Sagar <vsagar@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx MIPI CSI2 Rx Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-demosaic.c b/drivers/media/platform/xilinx/xilinx-demosaic.c
new file mode 100644
index 000000000000..a519c2c9719b
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-demosaic.c
@@ -0,0 +1,418 @@
+/*
+ * Xilinx Video Demosaic IP
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XDEMOSAIC_AP_CTRL (0x00)
+#define XDEMOSAIC_WIDTH (0x10)
+#define XDEMOSAIC_HEIGHT (0x18)
+#define XDEMOSAIC_INPUT_BAYER_FORMAT (0x28)
+
+#define XDEMOSAIC_MIN_HEIGHT (64)
+#define XDEMOSAIC_MAX_HEIGHT (4320)
+#define XDEMOSAIC_DEF_HEIGHT (720)
+#define XDEMOSAIC_MIN_WIDTH (64)
+#define XDEMOSAIC_MAX_WIDTH (8192)
+#define XDEMOSAIC_DEF_WIDTH (1280)
+
+#define XDEMOSAIC_RESET_DEASSERT (0)
+#define XDEMOSAIC_RESET_ASSERT (1)
+#define XDEMOSAIC_START BIT(0)
+#define XDEMOSAIC_AUTO_RESTART BIT(7)
+#define XDEMOSAIC_STREAM_ON (XDEMOSAIC_AUTO_RESTART | XDEMOSAIC_START)
+
+enum xdmsc_bayer_format {
+ XDEMOSAIC_RGGB = 0,
+ XDEMOSAIC_GRBG,
+ XDEMOSAIC_GBRG,
+ XDEMOSAIC_BGGR,
+};
+
+struct xdmsc_dev {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+
+ enum xdmsc_bayer_format bayer_fmt;
+ struct gpio_desc *rst_gpio;
+ u32 max_width;
+ u32 max_height;
+};
+
+static inline u32 xdmsc_read(struct xdmsc_dev *xdmsc, u32 reg)
+{
+ u32 data;
+
+ data = xvip_read(&xdmsc->xvip, reg);
+ dev_dbg(xdmsc->xvip.dev,
+ "Reading 0x%x from reg offset 0x%x", data, reg);
+ return data;
+}
+
+static inline void xdmsc_write(struct xdmsc_dev *xdmsc, u32 reg, u32 data)
+{
+ xvip_write(&xdmsc->xvip, reg, data);
+ dev_dbg(xdmsc->xvip.dev,
+ "Writing 0x%x to reg offset 0x%x", data, reg);
+#ifdef DEBUG
+ if (xdmsc_read(xdmsc, reg) != data)
+ dev_err(xdmsc->xvip.dev,
+ "Wrote 0x%x does not match read back", data);
+#endif
+}
+
+static inline struct xdmsc_dev *to_xdmsc(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xdmsc_dev, xvip.subdev);
+}
+
+static struct v4l2_mbus_framefmt
+*__xdmsc_get_pad_format(struct xdmsc_dev *xdmsc,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xdmsc->xvip.subdev,
+ cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xdmsc->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xdmsc_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xdmsc_dev *xdmsc = to_xdmsc(subdev);
+
+ if (!enable) {
+ dev_dbg(xdmsc->xvip.dev, "%s : Off", __func__);
+ gpiod_set_value_cansleep(xdmsc->rst_gpio,
+ XDEMOSAIC_RESET_ASSERT);
+ gpiod_set_value_cansleep(xdmsc->rst_gpio,
+ XDEMOSAIC_RESET_DEASSERT);
+ return 0;
+ }
+
+ xdmsc_write(xdmsc, XDEMOSAIC_WIDTH,
+ xdmsc->formats[XVIP_PAD_SINK].width);
+ xdmsc_write(xdmsc, XDEMOSAIC_HEIGHT,
+ xdmsc->formats[XVIP_PAD_SINK].height);
+ xdmsc_write(xdmsc, XDEMOSAIC_INPUT_BAYER_FORMAT, xdmsc->bayer_fmt);
+
+ /* Start Demosaic Video IP */
+ xdmsc_write(xdmsc, XDEMOSAIC_AP_CTRL, XDEMOSAIC_STREAM_ON);
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops xdmsc_video_ops = {
+ .s_stream = xdmsc_s_stream,
+};
+
+static int xdmsc_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xdmsc_dev *xdmsc = to_xdmsc(subdev);
+
+ fmt->format = *__xdmsc_get_pad_format(xdmsc, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static bool
+xdmsc_is_format_bayer(struct xdmsc_dev *xdmsc, u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_SRGGB16_1X16:
+ xdmsc->bayer_fmt = XDEMOSAIC_RGGB;
+ break;
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG16_1X16:
+ xdmsc->bayer_fmt = XDEMOSAIC_GRBG;
+ break;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGBRG16_1X16:
+ xdmsc->bayer_fmt = XDEMOSAIC_GBRG;
+ break;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SBGGR16_1X16:
+ xdmsc->bayer_fmt = XDEMOSAIC_BGGR;
+ break;
+ default:
+ dev_dbg(xdmsc->xvip.dev, "Unsupported format for Sink Pad");
+ return false;
+ }
+ return true;
+}
+
+static int xdmsc_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xdmsc_dev *xdmsc = to_xdmsc(subdev);
+ struct v4l2_mbus_framefmt *__format;
+
+ __format = __xdmsc_get_pad_format(xdmsc, cfg, fmt->pad, fmt->which);
+ *__format = fmt->format;
+
+ __format->width = clamp_t(unsigned int, fmt->format.width,
+ XDEMOSAIC_MIN_WIDTH, xdmsc->max_width);
+ __format->height = clamp_t(unsigned int, fmt->format.height,
+ XDEMOSAIC_MIN_HEIGHT, xdmsc->max_height);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ if (__format->code != MEDIA_BUS_FMT_RBG888_1X24 &&
+ __format->code != MEDIA_BUS_FMT_RBG101010_1X30 &&
+ __format->code != MEDIA_BUS_FMT_RBG121212_1X36 &&
+ __format->code != MEDIA_BUS_FMT_RBG161616_1X48) {
+ dev_dbg(xdmsc->xvip.dev,
+ "%s : Unsupported source media bus code format",
+ __func__);
+ __format->code = MEDIA_BUS_FMT_RBG888_1X24;
+ }
+ }
+
+ if (fmt->pad == XVIP_PAD_SINK) {
+ if (!xdmsc_is_format_bayer(xdmsc, __format->code)) {
+ dev_dbg(xdmsc->xvip.dev,
+ "Unsupported Sink Pad Media format, defaulting to RGGB");
+ __format->code = MEDIA_BUS_FMT_SRGGB8_1X8;
+ }
+ }
+
+ fmt->format = *__format;
+ return 0;
+}
+
+static int xdmsc_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xdmsc_dev *xdmsc = to_xdmsc(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xdmsc->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xdmsc->default_formats[XVIP_PAD_SOURCE];
+ return 0;
+}
+
+static int xdmsc_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops xdmsc_internal_ops = {
+ .open = xdmsc_open,
+ .close = xdmsc_close,
+};
+
+static const struct v4l2_subdev_pad_ops xdmsc_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xdmsc_get_format,
+ .set_fmt = xdmsc_set_format,
+};
+
+static const struct v4l2_subdev_ops xdmsc_ops = {
+ .video = &xdmsc_video_ops,
+ .pad = &xdmsc_pad_ops,
+};
+
+static const struct media_entity_operations xdmsc_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int xdmsc_parse_of(struct xdmsc_dev *xdmsc)
+{
+ struct device *dev = xdmsc->xvip.dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id = 0;
+ int rval;
+
+ rval = of_property_read_u32(node, "xlnx,max-height",
+ &xdmsc->max_height);
+ if (rval < 0) {
+ dev_err(dev, "missing xlnx,max-height property!");
+ return -EINVAL;
+ } else if (xdmsc->max_height > XDEMOSAIC_MAX_HEIGHT ||
+ xdmsc->max_height < XDEMOSAIC_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,max-width",
+ &xdmsc->max_width);
+ if (rval < 0) {
+ dev_err(dev, "missing xlnx,max-width property!");
+ return -EINVAL;
+ } else if (xdmsc->max_width > XDEMOSAIC_MAX_WIDTH ||
+ xdmsc->max_width < XDEMOSAIC_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ rval = of_property_read_u32(port, "reg", &port_id);
+ if (rval < 0) {
+ dev_err(dev, "No reg in DT");
+ return rval;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "Invalid reg in DT");
+ return -EINVAL;
+ }
+ }
+ }
+
+ xdmsc->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xdmsc->rst_gpio)) {
+ if (PTR_ERR(xdmsc->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(xdmsc->rst_gpio);
+ }
+ return 0;
+}
+
+static int xdmsc_probe(struct platform_device *pdev)
+{
+ struct xdmsc_dev *xdmsc;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *def_fmt;
+ int rval;
+
+ xdmsc = devm_kzalloc(&pdev->dev, sizeof(*xdmsc), GFP_KERNEL);
+ if (!xdmsc)
+ return -ENOMEM;
+ xdmsc->xvip.dev = &pdev->dev;
+ rval = xdmsc_parse_of(xdmsc);
+ if (rval < 0)
+ return rval;
+ rval = xvip_init_resources(&xdmsc->xvip);
+
+ /* Reset Demosaic IP */
+ gpiod_set_value_cansleep(xdmsc->rst_gpio,
+ XDEMOSAIC_RESET_DEASSERT);
+
+ /* Init V4L2 subdev */
+ subdev = &xdmsc->xvip.subdev;
+ v4l2_subdev_init(subdev, &xdmsc_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xdmsc_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Default Formats Initialization */
+ def_fmt = &xdmsc->default_formats[XVIP_PAD_SINK];
+ def_fmt->field = V4L2_FIELD_NONE;
+ def_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ def_fmt->width = XDEMOSAIC_DEF_WIDTH;
+ def_fmt->height = XDEMOSAIC_DEF_HEIGHT;
+
+ /*
+ * Sink Pad can be any Bayer format.
+ * Default Sink Pad format is RGGB.
+ */
+ def_fmt->code = MEDIA_BUS_FMT_SRGGB8_1X8;
+ xdmsc->formats[XVIP_PAD_SINK] = *def_fmt;
+
+ def_fmt = &xdmsc->default_formats[XVIP_PAD_SOURCE];
+ *def_fmt = xdmsc->default_formats[XVIP_PAD_SINK];
+
+ /* Source Pad has a fixed media bus format of RGB */
+ def_fmt->code = MEDIA_BUS_FMT_RBG888_1X24;
+ xdmsc->formats[XVIP_PAD_SOURCE] = *def_fmt;
+
+ xdmsc->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xdmsc->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Init Media Entity */
+ subdev->entity.ops = &xdmsc_media_ops;
+ rval = media_entity_pads_init(&subdev->entity, 2, xdmsc->pads);
+ if (rval < 0)
+ goto media_error;
+
+ platform_set_drvdata(pdev, xdmsc);
+ rval = v4l2_async_register_subdev(subdev);
+ if (rval < 0) {
+ dev_err(&pdev->dev, "failed to register subdev");
+ goto v4l2_subdev_error;
+ }
+ dev_info(&pdev->dev,
+ "Xilinx Video Demosaic Probe Successful");
+ return 0;
+
+v4l2_subdev_error:
+ media_entity_cleanup(&subdev->entity);
+media_error:
+ xvip_cleanup_resources(&xdmsc->xvip);
+ return rval;
+}
+
+static int xdmsc_remove(struct platform_device *pdev)
+{
+ struct xdmsc_dev *xdmsc = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xdmsc->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xdmsc->xvip);
+ return 0;
+}
+
+static const struct of_device_id xdmsc_of_id_table[] = {
+ {.compatible = "xlnx,v-demosaic"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xdmsc_of_id_table);
+
+static struct platform_driver xdmsc_driver = {
+ .driver = {
+ .name = "xilinx-demosaic",
+ .of_match_table = xdmsc_of_id_table,
+ },
+ .probe = xdmsc_probe,
+ .remove = xdmsc_remove,
+
+};
+
+module_platform_driver(xdmsc_driver);
+MODULE_DESCRIPTION("Xilinx Demosaic IP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index c9d5fdb2d407..e8090721d25b 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -10,11 +10,13 @@
*/
#include <linux/dma/xilinx_dma.h>
+#include <linux/dma/xilinx_frmbuf.h>
#include <linux/lcm.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/xilinx-v4l2-controls.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
@@ -36,6 +38,11 @@
#define XVIP_DMA_MIN_HEIGHT 1U
#define XVIP_DMA_MAX_HEIGHT 8191U
+struct xventity_list {
+ struct list_head list;
+ struct media_entity *entity;
+};
+
/* -----------------------------------------------------------------------------
* Helper functions
*/
@@ -60,9 +67,10 @@ static int xvip_dma_verify_format(struct xvip_dma *dma)
struct v4l2_subdev_format fmt;
struct v4l2_subdev *subdev;
int ret;
+ int width, height;
subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
- if (subdev == NULL)
+ if (!subdev)
return -EPIPE;
fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -70,10 +78,18 @@ static int xvip_dma_verify_format(struct xvip_dma *dma)
if (ret < 0)
return ret == -ENOIOCTLCMD ? -EINVAL : ret;
- if (dma->fmtinfo->code != fmt.format.code ||
- dma->format.height != fmt.format.height ||
- dma->format.width != fmt.format.width ||
- dma->format.colorspace != fmt.format.colorspace)
+ if (dma->fmtinfo->code != fmt.format.code)
+ return -EINVAL;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ width = dma->format.fmt.pix_mp.width;
+ height = dma->format.fmt.pix_mp.height;
+ } else {
+ width = dma->format.fmt.pix.width;
+ height = dma->format.fmt.pix.height;
+ }
+
+ if (width != fmt.format.width || height != fmt.format.height)
return -EINVAL;
return 0;
@@ -83,44 +99,135 @@ static int xvip_dma_verify_format(struct xvip_dma *dma)
* Pipeline Stream Management
*/
+static int xvip_entity_start_stop(struct xvip_composite_device *xdev,
+ struct media_entity *entity, bool start)
+{
+ struct v4l2_subdev *subdev;
+ bool is_streaming;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "%s entity %s\n",
+ start ? "Starting" : "Stopping", entity->name);
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ /* This is to maintain list of stream on/off devices */
+ is_streaming = xvip_subdev_set_streaming(xdev, subdev, start);
+
+ /*
+ * start or stop the subdev only once in case if they are
+ * shared between sub-graphs
+ */
+ if (start && !is_streaming) {
+ /* power-on subdevice */
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_power on failed on subdev\n");
+ xvip_subdev_set_streaming(xdev, subdev, 0);
+ return ret;
+ }
+
+ /* stream-on subdevice */
+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_stream on failed on subdev\n");
+ v4l2_subdev_call(subdev, core, s_power, 0);
+ xvip_subdev_set_streaming(xdev, subdev, 0);
+ }
+ } else if (!start && is_streaming) {
+ /* stream-off subdevice */
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_stream off failed on subdev\n");
+ xvip_subdev_set_streaming(xdev, subdev, 1);
+ }
+
+ /* power-off subdevice */
+ ret = v4l2_subdev_call(subdev, core, s_power, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ dev_err(xdev->dev,
+ "s_power off failed on subdev\n");
+ }
+
+ return ret;
+}
+
/**
* xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
- * @pipe: The pipeline
+ * @xdev: Composite video device
+ * @dma: xvip dma
* @start: Start (when true) or stop (when false) the pipeline
*
- * Walk the entities chain starting at the pipeline output video node and start
- * or stop all of them.
+ * Walk the entities chain starting @dma and start or stop all of them
*
* Return: 0 if successful, or the return value of the failed video::s_stream
* operation otherwise.
*/
-static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
+static int xvip_pipeline_start_stop(struct xvip_composite_device *xdev,
+ struct xvip_dma *dma, bool start)
{
- struct xvip_dma *dma = pipe->output;
- struct media_entity *entity;
- struct media_pad *pad;
- struct v4l2_subdev *subdev;
- int ret;
+ struct media_graph graph;
+ struct media_entity *entity = &dma->video.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct xventity_list *temp, *_temp;
+ LIST_HEAD(ent_list);
+ int ret = 0;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Walk the graph to locate the subdev nodes */
+ ret = media_graph_walk_init(&graph, mdev);
+ if (ret)
+ goto error;
- entity = &dma->video.entity;
- while (1) {
- pad = &entity->pads[0];
- if (!(pad->flags & MEDIA_PAD_FL_SINK))
- break;
+ media_graph_walk_start(&graph, entity);
- pad = media_entity_remote_pad(pad);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
- break;
+ /* get the list of entities */
+ while ((entity = media_graph_walk_next(&graph))) {
+ struct xventity_list *ele;
- entity = pad->entity;
- subdev = media_entity_to_v4l2_subdev(entity);
+ /* We want to stream on/off only subdevs */
+ if (!is_media_entity_v4l2_subdev(entity))
+ continue;
- ret = v4l2_subdev_call(subdev, video, s_stream, start);
- if (start && ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
+ /* Maintain the pipeline sequence in a list */
+ ele = kzalloc(sizeof(*ele), GFP_KERNEL);
+ if (!ele) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ele->entity = entity;
+ list_add(&ele->list, &ent_list);
}
- return 0;
+ if (start) {
+ list_for_each_entry_safe(temp, _temp, &ent_list, list) {
+ /* Enable all subdevs from sink to source */
+ ret = xvip_entity_start_stop(xdev, temp->entity, start);
+ if (ret < 0) {
+ dev_err(xdev->dev, "ret = %d for entity %s\n",
+ ret, temp->entity->name);
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry_safe_reverse(temp, _temp, &ent_list, list)
+ /* Enable all subdevs from source to sink */
+ xvip_entity_start_stop(xdev, temp->entity, start);
+ }
+
+ list_for_each_entry_safe(temp, _temp, &ent_list, list) {
+ list_del(&temp->list);
+ kfree(temp);
+ }
+
+error:
+ mutex_unlock(&mdev->graph_mutex);
+ media_graph_walk_cleanup(&graph);
+ return ret;
}
/**
@@ -133,7 +240,8 @@ static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
* independently, pipelines have a shared stream state that enable or disable
* all entities in the pipeline. For this reason the pipeline uses a streaming
* counter that tracks the number of DMA engines that have requested the stream
- * to be enabled.
+ * to be enabled. This will walk the graph starting from each DMA and enable or
+ * disable the entities in the path.
*
* When called with the @on argument set to true, this function will increment
* the pipeline streaming count. If the streaming count reaches the number of
@@ -150,20 +258,31 @@ static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
*/
static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
{
+ struct xvip_composite_device *xdev;
+ struct xvip_dma *dma;
int ret = 0;
mutex_lock(&pipe->lock);
+ xdev = pipe->xdev;
if (on) {
if (pipe->stream_count == pipe->num_dmas - 1) {
- ret = xvip_pipeline_start_stop(pipe, true);
- if (ret < 0)
- goto done;
+ /*
+ * This will iterate the DMAs and the stream-on of
+ * subdevs may not be sequential due to multiple
+ * sub-graph path
+ */
+ list_for_each_entry(dma, &xdev->dmas, list) {
+ ret = xvip_pipeline_start_stop(xdev, dma, true);
+ if (ret < 0)
+ goto done;
+ }
}
pipe->stream_count++;
} else {
if (--pipe->stream_count == 0)
- xvip_pipeline_start_stop(pipe, false);
+ list_for_each_entry(dma, &xdev->dmas, list)
+ xvip_pipeline_start_stop(xdev, dma, false);
}
done:
@@ -200,23 +319,22 @@ static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
dma = to_xvip_dma(media_entity_to_video_device(entity));
- if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
- pipe->output = dma;
+ if (dma->pad.flags & MEDIA_PAD_FL_SINK)
num_outputs++;
- } else {
+ else
num_inputs++;
- }
}
mutex_unlock(&mdev->graph_mutex);
media_graph_walk_cleanup(&graph);
- /* We need exactly one output and zero or one input. */
- if (num_outputs != 1 || num_inputs > 1)
+ /* We need at least one DMA to proceed */
+ if (num_outputs == 0 && num_inputs == 0)
return -EPIPE;
pipe->num_dmas = num_inputs + num_outputs;
+ pipe->xdev = start->xdev;
return 0;
}
@@ -224,7 +342,6 @@ static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
{
pipe->num_dmas = 0;
- pipe->output = NULL;
}
/**
@@ -287,11 +404,13 @@ done:
* @buf: vb2 buffer base object
* @queue: buffer list entry in the DMA engine queued buffers list
* @dma: DMA channel that uses the buffer
+ * @desc: Descriptor associated with this structure
*/
struct xvip_dma_buffer {
struct vb2_v4l2_buffer buf;
struct list_head queue;
struct xvip_dma *dma;
+ struct dma_async_tx_descriptor *desc;
};
#define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf)
@@ -300,6 +419,9 @@ static void xvip_dma_complete(void *param)
{
struct xvip_dma_buffer *buf = param;
struct xvip_dma *dma = buf->dma;
+ int i, sizeimage;
+ u32 fid;
+ int status;
spin_lock(&dma->queued_lock);
list_del(&buf->queue);
@@ -308,7 +430,38 @@ static void xvip_dma_complete(void *param)
buf->buf.field = V4L2_FIELD_NONE;
buf->buf.sequence = dma->sequence++;
buf->buf.vb2_buf.timestamp = ktime_get_ns();
- vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
+
+ status = xilinx_xdma_get_fid(dma->dma, buf->desc, &fid);
+ if (!status) {
+ if (((V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) &&
+ dma->format.fmt.pix_mp.field == V4L2_FIELD_ALTERNATE) ||
+ dma->format.fmt.pix.field == V4L2_FIELD_ALTERNATE) {
+ /*
+ * fid = 1 is odd field i.e. V4L2_FIELD_TOP.
+ * fid = 0 is even field i.e. V4L2_FIELD_BOTTOM.
+ */
+ buf->buf.field = fid ?
+ V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+
+ if (fid == dma->prev_fid)
+ buf->buf.sequence = dma->sequence++;
+
+ buf->buf.sequence >>= 1;
+ dma->prev_fid = fid;
+ }
+ }
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ for (i = 0; i < dma->fmtinfo->buffers; i++) {
+ sizeimage =
+ dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+ vb2_set_plane_payload(&buf->buf.vb2_buf, i, sizeimage);
+ }
+ } else {
+ sizeimage = dma->format.fmt.pix.sizeimage;
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, sizeimage);
+ }
+
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
}
@@ -318,13 +471,39 @@ xvip_dma_queue_setup(struct vb2_queue *vq,
unsigned int sizes[], struct device *alloc_devs[])
{
struct xvip_dma *dma = vb2_get_drv_priv(vq);
+ u8 i;
+ int sizeimage;
+
+ /* Multi planar case: Make sure the image size is large enough */
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ if (*nplanes) {
+ if (*nplanes != dma->format.fmt.pix_mp.num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; i++) {
+ sizeimage =
+ dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+ if (sizes[i] < sizeimage)
+ return -EINVAL;
+ }
+ } else {
+ *nplanes = dma->fmtinfo->buffers;
+ for (i = 0; i < dma->fmtinfo->buffers; i++) {
+ sizeimage =
+ dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+ sizes[i] = sizeimage;
+ }
+ }
+ return 0;
+ }
- /* Make sure the image size is large enough. */
- if (*nplanes)
- return sizes[0] < dma->format.sizeimage ? -EINVAL : 0;
+ /* Single planar case: Make sure the image size is large enough */
+ sizeimage = dma->format.fmt.pix.sizeimage;
+ if (*nplanes == 1)
+ return sizes[0] < sizeimage ? -EINVAL : 0;
*nplanes = 1;
- sizes[0] = dma->format.sizeimage;
+ sizes[0] = sizeimage;
return 0;
}
@@ -348,14 +527,19 @@ static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
struct dma_async_tx_descriptor *desc;
dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
u32 flags;
+ u32 luma_size;
+ u32 padding_factor_nume, padding_factor_deno, bpl_nume, bpl_deno;
+ u32 fid = ~0;
- if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
dma->xt.dir = DMA_DEV_TO_MEM;
dma->xt.src_sgl = false;
dma->xt.dst_sgl = true;
dma->xt.dst_start = addr;
- } else {
+ } else if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
dma->xt.dir = DMA_MEM_TO_DEV;
dma->xt.src_sgl = true;
@@ -363,10 +547,66 @@ static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
dma->xt.src_start = addr;
}
- dma->xt.frame_size = 1;
- dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp;
- dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size;
- dma->xt.numf = dma->format.height;
+ /*
+ * DMA IP supports only 2 planes, so one datachunk is sufficient
+ * to get start address of 2nd plane
+ */
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ struct v4l2_pix_format_mplane *pix_mp;
+
+ pix_mp = &dma->format.fmt.pix_mp;
+ xilinx_xdma_v4l2_config(dma->dma, pix_mp->pixelformat);
+ xvip_width_padding_factor(pix_mp->pixelformat,
+ &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume,
+ &bpl_deno);
+ dma->xt.frame_size = dma->fmtinfo->num_planes;
+ dma->sgl[0].size = (pix_mp->width * dma->fmtinfo->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ dma->sgl[0].icg = pix_mp->plane_fmt[0].bytesperline -
+ dma->sgl[0].size;
+ dma->xt.numf = pix_mp->height;
+
+ /*
+ * dst_icg is the number of bytes to jump after last luma addr
+ * and before first chroma addr
+ */
+
+ /* Handling contiguous data with mplanes */
+ if (dma->fmtinfo->buffers == 1) {
+ dma->sgl[0].dst_icg = 0;
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ if (dma->fmtinfo->buffers == 2) {
+ dma_addr_t chroma_addr =
+ vb2_dma_contig_plane_dma_addr(vb, 1);
+ luma_size = pix_mp->plane_fmt[0].bytesperline *
+ dma->xt.numf;
+ if (chroma_addr > addr)
+ dma->sgl[0].dst_icg = chroma_addr -
+ addr - luma_size;
+ }
+ }
+ } else {
+ struct v4l2_pix_format *pix;
+
+ pix = &dma->format.fmt.pix;
+ xilinx_xdma_v4l2_config(dma->dma, pix->pixelformat);
+ xvip_width_padding_factor(pix->pixelformat,
+ &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(pix->pixelformat, &bpl_nume,
+ &bpl_deno);
+ dma->xt.frame_size = dma->fmtinfo->num_planes;
+ dma->sgl[0].size = (pix->width * dma->fmtinfo->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ dma->sgl[0].icg = pix->bytesperline - dma->sgl[0].size;
+ dma->xt.numf = pix->height;
+ dma->sgl[0].dst_icg = 0;
+ }
desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
if (!desc) {
@@ -376,6 +616,28 @@ static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
}
desc->callback = xvip_dma_complete;
desc->callback_param = buf;
+ buf->desc = desc;
+
+ if (buf->buf.field == V4L2_FIELD_TOP)
+ fid = 1;
+ else if (buf->buf.field == V4L2_FIELD_BOTTOM)
+ fid = 0;
+ else if (buf->buf.field == V4L2_FIELD_NONE)
+ fid = 0;
+
+ xilinx_xdma_set_fid(dma->dma, desc, fid);
+
+ /* Set low latency capture mode */
+ if (dma->earlycb_mode) {
+ int ret;
+
+ ret = xilinx_xdma_set_earlycb(dma->dma, desc,
+ dma->earlycb_mode);
+ if (ret < 0) {
+ dev_err(dma->xdev->dev,
+ "Failed enable low latency mode\n");
+ }
+ }
spin_lock_irq(&dma->queued_lock);
list_add_tail(&buf->queue, &dma->queued_bufs);
@@ -395,6 +657,7 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
int ret;
dma->sequence = 0;
+ dma->prev_fid = ~0;
/*
* Start streaming on the pipeline. No link touching an entity in the
@@ -403,10 +666,12 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
* Use the pipeline object embedded in the first DMA object that starts
* streaming.
*/
+ mutex_lock(&dma->xdev->lock);
pipe = dma->video.entity.pipe
? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
+ mutex_unlock(&dma->xdev->lock);
if (ret < 0)
goto error;
@@ -427,7 +692,9 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
dma_async_issue_pending(dma->dma);
/* Start the pipeline. */
- xvip_pipeline_set_stream(pipe, true);
+ ret = xvip_pipeline_set_stream(pipe, true);
+ if (ret < 0)
+ goto error_stop;
return 0;
@@ -435,6 +702,7 @@ error_stop:
media_pipeline_stop(&dma->video.entity);
error:
+ dmaengine_terminate_all(dma->dma);
/* Give back all queued buffers to videobuf2. */
spin_lock_irq(&dma->queued_lock);
list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
@@ -493,10 +761,20 @@ xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
cap->device_caps = V4L2_CAP_STREAMING;
- if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ switch (dma->queue.type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
- else
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT;
+ break;
+ }
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS
| dma->xdev->v4l2_caps;
@@ -509,6 +787,61 @@ xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
return 0;
}
+static int xvip_xdma_enum_fmt(struct xvip_dma *dma, struct v4l2_fmtdesc *f,
+ struct v4l2_subdev_format *v4l_fmt)
+{
+ const struct xvip_video_format *fmt;
+ int ret;
+ u32 i, fmt_cnt, *fmts;
+
+ ret = xilinx_xdma_get_v4l2_vid_fmts(dma->dma, &fmt_cnt, &fmts);
+ if (ret)
+ return ret;
+
+ /* Has media pad value changed? */
+ if (v4l_fmt->format.code != dma->remote_subdev_med_bus ||
+ !dma->remote_subdev_med_bus) {
+ /* Re-generate legal list of fourcc codes */
+ dma->poss_v4l2_fmt_cnt = 0;
+ dma->remote_subdev_med_bus = v4l_fmt->format.code;
+
+ if (!dma->poss_v4l2_fmts) {
+ dma->poss_v4l2_fmts =
+ devm_kzalloc(&dma->video.dev,
+ sizeof(u32) * fmt_cnt,
+ GFP_KERNEL);
+ if (!dma->poss_v4l2_fmts)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < fmt_cnt; i++) {
+ fmt = xvip_get_format_by_fourcc(fmts[i]);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ if (fmt->code != dma->remote_subdev_med_bus)
+ continue;
+
+ dma->poss_v4l2_fmts[dma->poss_v4l2_fmt_cnt++] = fmts[i];
+ }
+ }
+
+ /* Return err if index is greater than count of legal values */
+ if (f->index >= dma->poss_v4l2_fmt_cnt)
+ return -EINVAL;
+
+ /* Else return pix format in table */
+ fmt = xvip_get_format_by_fourcc(dma->poss_v4l2_fmts[f->index]);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ f->pixelformat = fmt->fourcc;
+ strlcpy(f->description, fmt->description,
+ sizeof(f->description));
+
+ return 0;
+}
+
/* FIXME: without this callback function, some applications are not configured
* with correct formats, and it results in frames in wrong format. Whether this
* callback needs to be required is not clearly defined, so it should be
@@ -519,12 +852,49 @@ xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
struct v4l2_fh *vfh = file->private_data;
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_format v4l_fmt;
+ const struct xvip_video_format *fmt;
+ int err, ret;
+
+ /* Establish media pad format */
+ subdev = xvip_dma_remote_subdev(&dma->pad, &v4l_fmt.pad);
+ if (!subdev)
+ return -EPIPE;
+
+ v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ /*
+ * In case of frmbuf DMA, this will invoke frambuf driver specific APIs
+ * to enumerate formats otherwise return the pix format corresponding
+ * to subdev's media bus format. This kind of separation would be
+ * helpful for clean up and upstreaming.
+ */
+ err = xvip_xdma_enum_fmt(dma, f, &v4l_fmt);
+ if (!err)
+ return err;
+ /*
+ * This logic will just return one pix format based on subdev's
+ * media bus format
+ */
if (f->index > 0)
return -EINVAL;
- f->pixelformat = dma->format.pixelformat;
- strscpy(f->description, dma->fmtinfo->description,
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ f->pixelformat = dma->format.fmt.pix_mp.pixelformat;
+ else
+ f->pixelformat = dma->format.fmt.pix.pixelformat;
+
+ fmt = xvip_get_format_by_code(v4l_fmt.format.code);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ f->pixelformat = fmt->fourcc;
+ strlcpy(f->description, fmt->description,
sizeof(f->description));
return 0;
@@ -536,13 +906,17 @@ xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
struct v4l2_fh *vfh = file->private_data;
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
- format->fmt.pix = dma->format;
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ format->fmt.pix_mp = dma->format.fmt.pix_mp;
+ else
+ format->fmt.pix = dma->format.fmt.pix;
return 0;
}
static void
-__xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
+__xvip_dma_try_format(struct xvip_dma *dma,
+ struct v4l2_format *format,
const struct xvip_video_format **fmtinfo)
{
const struct xvip_video_format *info;
@@ -553,40 +927,144 @@ __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
unsigned int width;
unsigned int align;
unsigned int bpl;
+ unsigned int i, hsub, vsub, plane_width, plane_height;
+ unsigned int fourcc;
+ unsigned int padding_factor_nume, padding_factor_deno;
+ unsigned int bpl_nume, bpl_deno;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
+ if (!subdev)
+ return;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return;
+
+ if (fmt.format.field == V4L2_FIELD_ALTERNATE) {
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ dma->format.fmt.pix_mp.field = V4L2_FIELD_ALTERNATE;
+ else
+ dma->format.fmt.pix.field = V4L2_FIELD_ALTERNATE;
+ } else {
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ dma->format.fmt.pix_mp.field = V4L2_FIELD_NONE;
+ else
+ dma->format.fmt.pix.field = V4L2_FIELD_NONE;
+ }
/* Retrieve format information and select the default format if the
* requested format isn't supported.
*/
- info = xvip_get_format_by_fourcc(pix->pixelformat);
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ fourcc = format->fmt.pix_mp.pixelformat;
+ else
+ fourcc = format->fmt.pix.pixelformat;
+
+ info = xvip_get_format_by_fourcc(fourcc);
+
if (IS_ERR(info))
info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
- pix->pixelformat = info->fourcc;
- pix->field = V4L2_FIELD_NONE;
+ xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
/* The transfer alignment requirements are expressed in bytes. Compute
* the minimum and maximum values, clamp the requested width and convert
* it back to pixels.
*/
- align = lcm(dma->align, info->bpp);
+ align = lcm(dma->align, info->bpp >> 3);
min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
- width = rounddown(pix->width * info->bpp, align);
-
- pix->width = clamp(width, min_width, max_width) / info->bpp;
- pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
- XVIP_DMA_MAX_HEIGHT);
- /* Clamp the requested bytes per line value. If the maximum bytes per
- * line value is zero, the module doesn't support user configurable line
- * sizes. Override the requested value with the minimum in that case.
- */
- min_bpl = pix->width * info->bpp;
- max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
- bpl = rounddown(pix->bytesperline, dma->align);
-
- pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
- pix->sizeimage = pix->bytesperline * pix->height;
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+
+ pix_mp = &format->fmt.pix_mp;
+ plane_fmt = pix_mp->plane_fmt;
+ pix_mp->field = dma->format.fmt.pix_mp.field;
+ width = rounddown(pix_mp->width * info->bpl_factor, align);
+ pix_mp->width = clamp(width, min_width, max_width) /
+ info->bpl_factor;
+ pix_mp->height = clamp(pix_mp->height, XVIP_DMA_MIN_HEIGHT,
+ XVIP_DMA_MAX_HEIGHT);
+
+ /*
+ * Clamp the requested bytes per line value. If the maximum
+ * bytes per line value is zero, the module doesn't support
+ * user configurable line sizes. Override the requested value
+ * with the minimum in that case.
+ */
+
+ max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
+
+ /* Handling contiguous data with mplanes */
+ if (info->buffers == 1) {
+ min_bpl = (pix_mp->width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, dma->align);
+ bpl = roundup(plane_fmt[0].bytesperline, dma->align);
+ plane_fmt[0].bytesperline = clamp(bpl, min_bpl,
+ max_bpl);
+
+ if (info->num_planes == 1) {
+ /* Single plane formats */
+ plane_fmt[0].sizeimage =
+ plane_fmt[0].bytesperline *
+ pix_mp->height;
+ } else {
+ /* Multi plane formats */
+ plane_fmt[0].sizeimage =
+ DIV_ROUND_UP(plane_fmt[0].bytesperline *
+ pix_mp->height *
+ info->bpp, 8);
+ }
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ hsub = info->hsub;
+ vsub = info->vsub;
+ for (i = 0; i < info->num_planes; i++) {
+ plane_width = pix_mp->width / (i ? hsub : 1);
+ plane_height = pix_mp->height / (i ? vsub : 1);
+ min_bpl = (plane_width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, dma->align);
+ bpl = rounddown(plane_fmt[i].bytesperline,
+ dma->align);
+ plane_fmt[i].bytesperline =
+ clamp(bpl, min_bpl, max_bpl);
+ plane_fmt[i].sizeimage =
+ plane_fmt[i].bytesperline *
+ plane_height;
+ }
+ }
+ } else {
+ struct v4l2_pix_format *pix;
+
+ pix = &format->fmt.pix;
+ pix->field = dma->format.fmt.pix.field;
+ width = rounddown(pix->width * info->bpl_factor, align);
+ pix->width = clamp(width, min_width, max_width) /
+ info->bpl_factor;
+ pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
+ XVIP_DMA_MAX_HEIGHT);
+
+ min_bpl = (pix->width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, dma->align);
+ max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
+ bpl = rounddown(pix->bytesperline, dma->align);
+ pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
+ pix->sizeimage = pix->width * pix->height * info->bpp / 8;
+ }
if (fmtinfo)
*fmtinfo = info;
@@ -598,7 +1076,7 @@ xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
struct v4l2_fh *vfh = file->private_data;
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
- __xvip_dma_try_format(dma, &format->fmt.pix, NULL);
+ __xvip_dma_try_format(dma, format, NULL);
return 0;
}
@@ -609,26 +1087,59 @@ xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
const struct xvip_video_format *info;
- __xvip_dma_try_format(dma, &format->fmt.pix, &info);
+ __xvip_dma_try_format(dma, format, &info);
if (vb2_is_busy(&dma->queue))
return -EBUSY;
- dma->format = format->fmt.pix;
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ dma->format.fmt.pix_mp = format->fmt.pix_mp;
+ else
+ dma->format.fmt.pix = format->fmt.pix;
+
dma->fmtinfo = info;
return 0;
}
+static int
+xvip_dma_set_ctrl(struct file *file, void *fh, struct v4l2_control *ctl)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+
+ if (vb2_is_busy(&dma->queue))
+ return -EBUSY;
+
+ if (ctl->id == V4L2_CID_XILINX_LOW_LATENCY) {
+ if (ctl->value)
+ dma->earlycb_mode = EARLY_CALLBACK_LOW_LATENCY;
+ else
+ dma->earlycb_mode = 0;
+ }
+
+ return 0;
+}
+
static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
.vidioc_querycap = xvip_dma_querycap,
.vidioc_enum_fmt_vid_cap = xvip_dma_enum_format,
+ .vidioc_enum_fmt_vid_cap_mplane = xvip_dma_enum_format,
+ .vidioc_enum_fmt_vid_out = xvip_dma_enum_format,
+ .vidioc_enum_fmt_vid_out_mplane = xvip_dma_enum_format,
.vidioc_g_fmt_vid_cap = xvip_dma_get_format,
+ .vidioc_g_fmt_vid_cap_mplane = xvip_dma_get_format,
.vidioc_g_fmt_vid_out = xvip_dma_get_format,
+ .vidioc_g_fmt_vid_out_mplane = xvip_dma_get_format,
.vidioc_s_fmt_vid_cap = xvip_dma_set_format,
+ .vidioc_s_fmt_vid_cap_mplane = xvip_dma_set_format,
.vidioc_s_fmt_vid_out = xvip_dma_set_format,
+ .vidioc_s_fmt_vid_out_mplane = xvip_dma_set_format,
+ .vidioc_s_ctrl = xvip_dma_set_ctrl,
.vidioc_try_fmt_vid_cap = xvip_dma_try_format,
+ .vidioc_try_fmt_vid_cap_mplane = xvip_dma_try_format,
.vidioc_try_fmt_vid_out = xvip_dma_try_format,
+ .vidioc_try_fmt_vid_out_mplane = xvip_dma_try_format,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
@@ -661,6 +1172,7 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
{
char name[16];
int ret;
+ u32 i, hsub, vsub, width, height;
dma->xdev = xdev;
dma->port = port;
@@ -670,17 +1182,56 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
spin_lock_init(&dma->queued_lock);
dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
- dma->format.pixelformat = dma->fmtinfo->fourcc;
- dma->format.colorspace = V4L2_COLORSPACE_SRGB;
- dma->format.field = V4L2_FIELD_NONE;
- dma->format.width = XVIP_DMA_DEF_WIDTH;
- dma->format.height = XVIP_DMA_DEF_HEIGHT;
- dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
- dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
+ dma->format.type = type;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+ struct v4l2_pix_format_mplane *pix_mp;
+
+ pix_mp = &dma->format.fmt.pix_mp;
+ pix_mp->pixelformat = dma->fmtinfo->fourcc;
+ pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->width = XVIP_DMA_DEF_WIDTH;
+
+ /* Handling contiguous data with mplanes */
+ if (dma->fmtinfo->buffers == 1) {
+ pix_mp->plane_fmt[0].bytesperline =
+ pix_mp->width * dma->fmtinfo->bpl_factor;
+ pix_mp->plane_fmt[0].sizeimage =
+ pix_mp->width * pix_mp->height *
+ dma->fmtinfo->bpp / 8;
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ hsub = dma->fmtinfo->hsub;
+ vsub = dma->fmtinfo->vsub;
+ for (i = 0; i < dma->fmtinfo->buffers; i++) {
+ width = pix_mp->width / (i ? hsub : 1);
+ height = pix_mp->height / (i ? vsub : 1);
+ pix_mp->plane_fmt[i].bytesperline =
+ width * dma->fmtinfo->bpl_factor;
+ pix_mp->plane_fmt[i].sizeimage = width * height;
+ }
+ }
+ } else {
+ struct v4l2_pix_format *pix;
+
+ pix = &dma->format.fmt.pix;
+ pix->pixelformat = dma->fmtinfo->fourcc;
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->field = V4L2_FIELD_NONE;
+ pix->width = XVIP_DMA_DEF_WIDTH;
+ pix->height = XVIP_DMA_DEF_HEIGHT;
+ pix->bytesperline = pix->width * dma->fmtinfo->bpl_factor;
+ pix->sizeimage =
+ pix->width * pix->height * dma->fmtinfo->bpp / 8;
+ }
/* Initialize the media entity... */
- dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
- ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ dma->pad.flags = MEDIA_PAD_FL_SINK;
+ else
+ dma->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
if (ret < 0)
@@ -692,11 +1243,18 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
dma->video.queue = &dma->queue;
snprintf(dma->video.name, sizeof(dma->video.name), "%pOFn %s %u",
xdev->dev->of_node,
- type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
+ (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ ? "output" : "input",
port);
+
dma->video.vfl_type = VFL_TYPE_GRABBER;
- dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
- ? VFL_DIR_RX : VFL_DIR_TX;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ dma->video.vfl_dir = VFL_DIR_RX;
+ else
+ dma->video.vfl_dir = VFL_DIR_TX;
+
dma->video.release = video_device_release_empty;
dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
dma->video.lock = &dma->lock;
@@ -729,10 +1287,12 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
/* ... and the DMA channel. */
snprintf(name, sizeof(name), "port%u", port);
- dma->dma = dma_request_slave_channel(dma->xdev->dev, name);
- if (dma->dma == NULL) {
- dev_err(dma->xdev->dev, "no VDMA channel found\n");
- ret = -ENODEV;
+ dma->dma = dma_request_chan(dma->xdev->dev, name);
+ if (IS_ERR(dma->dma)) {
+ ret = PTR_ERR(dma->dma);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dma->xdev->dev,
+ "No Video DMA channel found");
goto error;
}
@@ -756,7 +1316,7 @@ void xvip_dma_cleanup(struct xvip_dma *dma)
if (video_is_registered(&dma->video))
video_unregister_device(&dma->video);
- if (dma->dma)
+ if (!IS_ERR(dma->dma))
dma_release_channel(dma->dma);
media_entity_cleanup(&dma->video.entity);
diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h
index 5aec4d17eb21..61c26ab103f7 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.h
+++ b/drivers/media/platform/xilinx/xilinx-dma.h
@@ -32,7 +32,7 @@ struct xvip_video_format;
* @use_count: number of DMA engines using the pipeline
* @stream_count: number of DMA engines currently streaming
* @num_dmas: number of DMA engines in the pipeline
- * @output: DMA engine at the output of the pipeline
+ * @xdev: Composite device the pipe belongs to
*/
struct xvip_pipeline {
struct media_pipeline pipe;
@@ -42,7 +42,7 @@ struct xvip_pipeline {
unsigned int stream_count;
unsigned int num_dmas;
- struct xvip_dma *output;
+ struct xvip_composite_device *xdev;
};
static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
@@ -55,12 +55,15 @@ static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
* @list: list entry in a composite device dmas list
* @video: V4L2 video device associated with the DMA channel
* @pad: media pad for the video device entity
+ * @remote_subdev_med_bus: media bus format of sub-device
* @xdev: composite device the DMA channel belongs to
* @pipe: pipeline belonging to the DMA channel
* @port: composite device DT node port number for the DMA channel
* @lock: protects the @format, @fmtinfo and @queue fields
* @format: active V4L2 pixel format
* @fmtinfo: format information corresponding to the active @format
+ * @poss_v4l2_fmts: All possible v4l formats supported
+ * @poss_v4l2_fmt_cnt: number of supported v4l formats
* @queue: vb2 buffers queue
* @sequence: V4L2 buffers sequence number
* @queued_bufs: list of queued buffers
@@ -69,19 +72,23 @@ static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
* @align: transfer alignment required by the DMA channel (in bytes)
* @xt: dma interleaved template for dma configuration
* @sgl: data chunk structure for dma_interleaved_template
+ * @prev_fid: Previous Field ID
*/
struct xvip_dma {
struct list_head list;
struct video_device video;
struct media_pad pad;
+ u32 remote_subdev_med_bus;
struct xvip_composite_device *xdev;
struct xvip_pipeline pipe;
unsigned int port;
struct mutex lock;
- struct v4l2_pix_format format;
+ struct v4l2_format format;
const struct xvip_video_format *fmtinfo;
+ u32 *poss_v4l2_fmts;
+ u32 poss_v4l2_fmt_cnt;
struct vb2_queue queue;
unsigned int sequence;
@@ -93,6 +100,9 @@ struct xvip_dma {
unsigned int align;
struct dma_interleaved_template xt;
struct data_chunk sgl[1];
+
+ u32 prev_fid;
+ u32 earlycb_mode;
};
#define to_xvip_dma(vdev) container_of(vdev, struct xvip_dma, video)
diff --git a/drivers/media/platform/xilinx/xilinx-gamma-coeff.h b/drivers/media/platform/xilinx/xilinx-gamma-coeff.h
new file mode 100644
index 000000000000..344260008a47
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-gamma-coeff.h
@@ -0,0 +1,5385 @@
+/*
+ * Xilinx Gamma Correction IP
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_GAMMA_COEFF_H__
+#define __XILINX_GAMMA_COEFF_H__
+
+#define GAMMA_CURVE_LENGTH (40)
+
+#define GAMMA_BPC_8 (8)
+#define GAMMA8_TABLE_LENGTH BIT(GAMMA_BPC_8)
+static const u16 xgamma8_01[GAMMA8_TABLE_LENGTH] = {
+ 0, 147, 157, 164, 168, 172, 175, 178,
+ 180, 183, 184, 186, 188, 189, 191, 192,
+ 193, 195, 196, 197, 198, 199, 200, 200,
+ 201, 202, 203, 204, 204, 205, 206, 207,
+ 207, 208, 208, 209, 210, 210, 211, 211,
+ 212, 212, 213, 213, 214, 214, 215, 215,
+ 216, 216, 217, 217, 218, 218, 218, 219,
+ 219, 220, 220, 220, 221, 221, 221, 222,
+ 222, 222, 223, 223, 223, 224, 224, 224,
+ 225, 225, 225, 226, 226, 226, 227, 227,
+ 227, 227, 228, 228, 228, 228, 229, 229,
+ 229, 230, 230, 230, 230, 231, 231, 231,
+ 231, 232, 232, 232, 232, 232, 233, 233,
+ 233, 233, 234, 234, 234, 234, 234, 235,
+ 235, 235, 235, 235, 236, 236, 236, 236,
+ 236, 237, 237, 237, 237, 237, 238, 238,
+ 238, 238, 238, 239, 239, 239, 239, 239,
+ 239, 240, 240, 240, 240, 240, 240, 241,
+ 241, 241, 241, 241, 241, 242, 242, 242,
+ 242, 242, 242, 243, 243, 243, 243, 243,
+ 243, 244, 244, 244, 244, 244, 244, 244,
+ 245, 245, 245, 245, 245, 245, 245, 246,
+ 246, 246, 246, 246, 246, 246, 247, 247,
+ 247, 247, 247, 247, 247, 247, 248, 248,
+ 248, 248, 248, 248, 248, 249, 249, 249,
+ 249, 249, 249, 249, 249, 249, 250, 250,
+ 250, 250, 250, 250, 250, 250, 251, 251,
+ 251, 251, 251, 251, 251, 251, 251, 252,
+ 252, 252, 252, 252, 252, 252, 252, 252,
+ 253, 253, 253, 253, 253, 253, 253, 253,
+ 253, 254, 254, 254, 254, 254, 254, 254,
+ 254, 254, 254, 255, 255, 255, 255, 255,
+};
+
+static const u16 xgamma8_02[GAMMA8_TABLE_LENGTH] = {
+ 0, 84, 97, 105, 111, 116, 120, 124,
+ 128, 131, 133, 136, 138, 141, 143, 145,
+ 147, 148, 150, 152, 153, 155, 156, 158,
+ 159, 160, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 179, 180, 181, 182,
+ 183, 183, 184, 185, 186, 186, 187, 188,
+ 188, 189, 190, 190, 191, 192, 192, 193,
+ 193, 194, 195, 195, 196, 196, 197, 197,
+ 198, 199, 199, 200, 200, 201, 201, 202,
+ 202, 203, 203, 204, 204, 205, 205, 206,
+ 206, 207, 207, 208, 208, 208, 209, 209,
+ 210, 210, 211, 211, 211, 212, 212, 213,
+ 213, 214, 214, 214, 215, 215, 216, 216,
+ 216, 217, 217, 217, 218, 218, 219, 219,
+ 219, 220, 220, 220, 221, 221, 221, 222,
+ 222, 223, 223, 223, 224, 224, 224, 225,
+ 225, 225, 226, 226, 226, 227, 227, 227,
+ 227, 228, 228, 228, 229, 229, 229, 230,
+ 230, 230, 231, 231, 231, 231, 232, 232,
+ 232, 233, 233, 233, 233, 234, 234, 234,
+ 235, 235, 235, 235, 236, 236, 236, 237,
+ 237, 237, 237, 238, 238, 238, 238, 239,
+ 239, 239, 239, 240, 240, 240, 240, 241,
+ 241, 241, 241, 242, 242, 242, 242, 243,
+ 243, 243, 243, 244, 244, 244, 244, 245,
+ 245, 245, 245, 246, 246, 246, 246, 246,
+ 247, 247, 247, 247, 248, 248, 248, 248,
+ 248, 249, 249, 249, 249, 250, 250, 250,
+ 250, 250, 251, 251, 251, 251, 252, 252,
+ 252, 252, 252, 253, 253, 253, 253, 253,
+ 254, 254, 254, 254, 254, 255, 255, 255,
+};
+
+static const u16 xgamma8_03[GAMMA8_TABLE_LENGTH] = {
+ 0, 48, 60, 67, 73, 78, 83, 87,
+ 90, 94, 97, 99, 102, 104, 107, 109,
+ 111, 113, 115, 117, 119, 121, 122, 124,
+ 125, 127, 129, 130, 131, 133, 134, 136,
+ 137, 138, 139, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 151, 152, 153, 154,
+ 155, 155, 156, 157, 158, 159, 160, 161,
+ 162, 163, 164, 164, 165, 166, 167, 168,
+ 168, 169, 170, 171, 172, 172, 173, 174,
+ 174, 175, 176, 177, 177, 178, 179, 179,
+ 180, 181, 181, 182, 183, 183, 184, 185,
+ 185, 186, 187, 187, 188, 188, 189, 190,
+ 190, 191, 191, 192, 193, 193, 194, 194,
+ 195, 195, 196, 197, 197, 198, 198, 199,
+ 199, 200, 200, 201, 201, 202, 202, 203,
+ 203, 204, 204, 205, 205, 206, 206, 207,
+ 207, 208, 208, 209, 209, 210, 210, 211,
+ 211, 212, 212, 213, 213, 213, 214, 214,
+ 215, 215, 216, 216, 217, 217, 217, 218,
+ 218, 219, 219, 220, 220, 220, 221, 221,
+ 222, 222, 223, 223, 223, 224, 224, 225,
+ 225, 225, 226, 226, 227, 227, 227, 228,
+ 228, 229, 229, 229, 230, 230, 230, 231,
+ 231, 232, 232, 232, 233, 233, 233, 234,
+ 234, 235, 235, 235, 236, 236, 236, 237,
+ 237, 237, 238, 238, 238, 239, 239, 240,
+ 240, 240, 241, 241, 241, 242, 242, 242,
+ 243, 243, 243, 244, 244, 244, 245, 245,
+ 245, 246, 246, 246, 247, 247, 247, 248,
+ 248, 248, 249, 249, 249, 249, 250, 250,
+ 250, 251, 251, 251, 252, 252, 252, 253,
+ 253, 253, 253, 254, 254, 254, 255, 255,
+};
+
+static const u16 xgamma8_04[GAMMA8_TABLE_LENGTH] = {
+ 0, 28, 37, 43, 48, 53, 57, 61,
+ 64, 67, 70, 73, 75, 78, 80, 82,
+ 84, 86, 88, 90, 92, 94, 96, 97,
+ 99, 101, 102, 104, 105, 107, 108, 110,
+ 111, 113, 114, 115, 117, 118, 119, 120,
+ 122, 123, 124, 125, 126, 127, 129, 130,
+ 131, 132, 133, 134, 135, 136, 137, 138,
+ 139, 140, 141, 142, 143, 144, 145, 146,
+ 147, 148, 149, 149, 150, 151, 152, 153,
+ 154, 155, 155, 156, 157, 158, 159, 160,
+ 160, 161, 162, 163, 164, 164, 165, 166,
+ 167, 167, 168, 169, 170, 170, 171, 172,
+ 173, 173, 174, 175, 175, 176, 177, 177,
+ 178, 179, 179, 180, 181, 182, 182, 183,
+ 183, 184, 185, 185, 186, 187, 187, 188,
+ 189, 189, 190, 190, 191, 192, 192, 193,
+ 194, 194, 195, 195, 196, 197, 197, 198,
+ 198, 199, 199, 200, 201, 201, 202, 202,
+ 203, 203, 204, 205, 205, 206, 206, 207,
+ 207, 208, 208, 209, 209, 210, 211, 211,
+ 212, 212, 213, 213, 214, 214, 215, 215,
+ 216, 216, 217, 217, 218, 218, 219, 219,
+ 220, 220, 221, 221, 222, 222, 223, 223,
+ 224, 224, 225, 225, 226, 226, 227, 227,
+ 228, 228, 229, 229, 230, 230, 230, 231,
+ 231, 232, 232, 233, 233, 234, 234, 235,
+ 235, 235, 236, 236, 237, 237, 238, 238,
+ 239, 239, 240, 240, 240, 241, 241, 242,
+ 242, 243, 243, 243, 244, 244, 245, 245,
+ 246, 246, 246, 247, 247, 248, 248, 248,
+ 249, 249, 250, 250, 251, 251, 251, 252,
+ 252, 253, 253, 253, 254, 254, 255, 255,
+};
+
+static const u16 xgamma8_05[GAMMA8_TABLE_LENGTH] = {
+ 0, 16, 23, 28, 32, 36, 39, 42,
+ 45, 48, 50, 53, 55, 58, 60, 62,
+ 64, 66, 68, 70, 71, 73, 75, 77,
+ 78, 80, 81, 83, 84, 86, 87, 89,
+ 90, 92, 93, 94, 96, 97, 98, 100,
+ 101, 102, 103, 105, 106, 107, 108, 109,
+ 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 145, 146, 147, 148, 149,
+ 150, 151, 151, 152, 153, 154, 155, 156,
+ 156, 157, 158, 159, 160, 160, 161, 162,
+ 163, 164, 164, 165, 166, 167, 167, 168,
+ 169, 170, 170, 171, 172, 173, 173, 174,
+ 175, 176, 176, 177, 178, 179, 179, 180,
+ 181, 181, 182, 183, 183, 184, 185, 186,
+ 186, 187, 188, 188, 189, 190, 190, 191,
+ 192, 192, 193, 194, 194, 195, 196, 196,
+ 197, 198, 198, 199, 199, 200, 201, 201,
+ 202, 203, 203, 204, 204, 205, 206, 206,
+ 207, 208, 208, 209, 209, 210, 211, 211,
+ 212, 212, 213, 214, 214, 215, 215, 216,
+ 217, 217, 218, 218, 219, 220, 220, 221,
+ 221, 222, 222, 223, 224, 224, 225, 225,
+ 226, 226, 227, 228, 228, 229, 229, 230,
+ 230, 231, 231, 232, 233, 233, 234, 234,
+ 235, 235, 236, 236, 237, 237, 238, 238,
+ 239, 240, 240, 241, 241, 242, 242, 243,
+ 243, 244, 244, 245, 245, 246, 246, 247,
+ 247, 248, 248, 249, 249, 250, 250, 251,
+ 251, 252, 252, 253, 253, 254, 254, 255,
+};
+
+static const u16 xgamma8_06[GAMMA8_TABLE_LENGTH] = {
+ 0, 9, 14, 18, 21, 24, 27, 29,
+ 32, 34, 37, 39, 41, 43, 45, 47,
+ 48, 50, 52, 54, 55, 57, 59, 60,
+ 62, 63, 65, 66, 68, 69, 71, 72,
+ 73, 75, 76, 77, 79, 80, 81, 83,
+ 84, 85, 86, 88, 89, 90, 91, 92,
+ 94, 95, 96, 97, 98, 99, 100, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110,
+ 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 120, 121, 122, 123, 124, 125, 126,
+ 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 137, 138, 139, 140, 141,
+ 142, 143, 144, 145, 145, 146, 147, 148,
+ 149, 150, 151, 151, 152, 153, 154, 155,
+ 156, 156, 157, 158, 159, 160, 161, 161,
+ 162, 163, 164, 165, 165, 166, 167, 168,
+ 169, 169, 170, 171, 172, 173, 173, 174,
+ 175, 176, 176, 177, 178, 179, 179, 180,
+ 181, 182, 182, 183, 184, 185, 185, 186,
+ 187, 188, 188, 189, 190, 191, 191, 192,
+ 193, 194, 194, 195, 196, 196, 197, 198,
+ 199, 199, 200, 201, 201, 202, 203, 203,
+ 204, 205, 206, 206, 207, 208, 208, 209,
+ 210, 210, 211, 212, 212, 213, 214, 214,
+ 215, 216, 216, 217, 218, 218, 219, 220,
+ 220, 221, 222, 222, 223, 224, 224, 225,
+ 226, 226, 227, 228, 228, 229, 230, 230,
+ 231, 231, 232, 233, 233, 234, 235, 235,
+ 236, 237, 237, 238, 238, 239, 240, 240,
+ 241, 242, 242, 243, 243, 244, 245, 245,
+ 246, 247, 247, 248, 248, 249, 250, 250,
+ 251, 251, 252, 253, 253, 254, 254, 255,
+};
+
+static const u16 xgamma8_07[GAMMA8_TABLE_LENGTH] = {
+ 0, 5, 9, 11, 14, 16, 18, 21,
+ 23, 25, 26, 28, 30, 32, 33, 35,
+ 37, 38, 40, 41, 43, 44, 46, 47,
+ 49, 50, 52, 53, 54, 56, 57, 58,
+ 60, 61, 62, 64, 65, 66, 67, 69,
+ 70, 71, 72, 73, 75, 76, 77, 78,
+ 79, 80, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150,
+ 150, 151, 152, 153, 154, 155, 156, 157,
+ 157, 158, 159, 160, 161, 162, 163, 163,
+ 164, 165, 166, 167, 168, 168, 169, 170,
+ 171, 172, 173, 173, 174, 175, 176, 177,
+ 178, 178, 179, 180, 181, 182, 182, 183,
+ 184, 185, 186, 186, 187, 188, 189, 190,
+ 190, 191, 192, 193, 194, 194, 195, 196,
+ 197, 197, 198, 199, 200, 201, 201, 202,
+ 203, 204, 204, 205, 206, 207, 208, 208,
+ 209, 210, 211, 211, 212, 213, 214, 214,
+ 215, 216, 217, 217, 218, 219, 220, 220,
+ 221, 222, 223, 223, 224, 225, 226, 226,
+ 227, 228, 228, 229, 230, 231, 231, 232,
+ 233, 234, 234, 235, 236, 237, 237, 238,
+ 239, 239, 240, 241, 242, 242, 243, 244,
+ 244, 245, 246, 247, 247, 248, 249, 249,
+ 250, 251, 251, 252, 253, 254, 254, 255,
+};
+
+static const u16 xgamma8_08[GAMMA8_TABLE_LENGTH] = {
+ 0, 3, 5, 7, 9, 11, 13, 14,
+ 16, 18, 19, 21, 22, 24, 25, 26,
+ 28, 29, 31, 32, 33, 35, 36, 37,
+ 39, 40, 41, 42, 44, 45, 46, 47,
+ 48, 50, 51, 52, 53, 54, 56, 57,
+ 58, 59, 60, 61, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 82, 83,
+ 84, 85, 86, 88, 89, 90, 91, 92,
+ 93, 94, 95, 96, 97, 98, 99, 100,
+ 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 122, 123, 123,
+ 124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 140, 141, 142, 143, 144, 145, 146,
+ 147, 148, 149, 150, 151, 151, 152, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161,
+ 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 177, 178, 179, 180, 181, 182,
+ 183, 183, 184, 185, 186, 187, 188, 189,
+ 190, 190, 191, 192, 193, 194, 195, 196,
+ 196, 197, 198, 199, 200, 201, 202, 202,
+ 203, 204, 205, 206, 207, 207, 208, 209,
+ 210, 211, 212, 212, 213, 214, 215, 216,
+ 217, 217, 218, 219, 220, 221, 222, 222,
+ 223, 224, 225, 226, 227, 227, 228, 229,
+ 230, 231, 232, 232, 233, 234, 235, 236,
+ 236, 237, 238, 239, 240, 240, 241, 242,
+ 243, 244, 245, 245, 246, 247, 248, 249,
+ 249, 250, 251, 252, 253, 253, 254, 255,
+};
+
+static const u16 xgamma8_09[GAMMA8_TABLE_LENGTH] = {
+ 0, 2, 3, 5, 6, 7, 9, 10,
+ 11, 13, 14, 15, 16, 18, 19, 20,
+ 21, 22, 23, 25, 26, 27, 28, 29,
+ 30, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105,
+ 106, 107, 108, 109, 110, 111, 112, 113,
+ 114, 115, 116, 117, 118, 119, 120, 121,
+ 122, 123, 124, 125, 126, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152,
+ 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 221, 222, 223, 224, 225, 226,
+ 227, 228, 229, 230, 231, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241,
+ 241, 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 250, 251, 252, 253, 254, 255,
+};
+
+static const u16 xgamma8_10[GAMMA8_TABLE_LENGTH] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255,
+};
+
+static const u16 xgamma8_11[GAMMA8_TABLE_LENGTH] = {
+ 0, 1, 1, 2, 3, 3, 4, 5,
+ 6, 6, 7, 8, 9, 10, 10, 11,
+ 12, 13, 14, 15, 16, 16, 17, 18,
+ 19, 20, 21, 22, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110,
+ 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160,
+ 161, 162, 163, 164, 165, 166, 167, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 201, 202, 203,
+ 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 214, 215, 216, 217, 218, 219, 220,
+ 221, 222, 223, 224, 225, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237,
+ 239, 240, 241, 242, 243, 244, 245, 246,
+ 247, 248, 250, 251, 252, 253, 254, 255,
+};
+
+static const u16 xgamma8_12[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 1, 1, 2, 2, 3, 3,
+ 4, 5, 5, 6, 7, 7, 8, 9,
+ 9, 10, 11, 11, 12, 13, 13, 14,
+ 15, 16, 16, 17, 18, 19, 20, 20,
+ 21, 22, 23, 24, 24, 25, 26, 27,
+ 28, 28, 29, 30, 31, 32, 33, 34,
+ 34, 35, 36, 37, 38, 39, 40, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152, 153,
+ 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 165, 166, 167, 168, 169, 170, 171,
+ 172, 173, 175, 176, 177, 178, 179, 180,
+ 181, 183, 184, 185, 186, 187, 188, 189,
+ 191, 192, 193, 194, 195, 196, 197, 199,
+ 200, 201, 202, 203, 204, 205, 207, 208,
+ 209, 210, 211, 212, 214, 215, 216, 217,
+ 218, 219, 221, 222, 223, 224, 225, 226,
+ 228, 229, 230, 231, 232, 234, 235, 236,
+ 237, 238, 239, 241, 242, 243, 244, 245,
+ 247, 248, 249, 250, 251, 253, 254, 255,
+};
+
+static const u16 xgamma8_13[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 1, 1, 2, 2, 2,
+ 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 8, 8, 9, 9, 10, 11, 11,
+ 12, 12, 13, 14, 14, 15, 16, 16,
+ 17, 18, 19, 19, 20, 21, 21, 22,
+ 23, 24, 24, 25, 26, 27, 28, 28,
+ 29, 30, 31, 31, 32, 33, 34, 35,
+ 36, 36, 37, 38, 39, 40, 41, 41,
+ 42, 43, 44, 45, 46, 47, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 135, 136, 137, 138,
+ 139, 140, 141, 143, 144, 145, 146, 147,
+ 148, 149, 151, 152, 153, 154, 155, 156,
+ 157, 159, 160, 161, 162, 163, 164, 166,
+ 167, 168, 169, 170, 172, 173, 174, 175,
+ 176, 178, 179, 180, 181, 182, 184, 185,
+ 186, 187, 188, 190, 191, 192, 193, 194,
+ 196, 197, 198, 199, 201, 202, 203, 204,
+ 206, 207, 208, 209, 210, 212, 213, 214,
+ 215, 217, 218, 219, 220, 222, 223, 224,
+ 226, 227, 228, 229, 231, 232, 233, 234,
+ 236, 237, 238, 240, 241, 242, 243, 245,
+ 246, 247, 249, 250, 251, 252, 254, 255,
+};
+
+static const u16 xgamma8_14[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 1, 1, 1, 1, 2,
+ 2, 2, 3, 3, 4, 4, 4, 5,
+ 5, 6, 6, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 12, 12, 13, 13,
+ 14, 15, 15, 16, 16, 17, 18, 18,
+ 19, 20, 20, 21, 22, 22, 23, 24,
+ 25, 25, 26, 27, 28, 28, 29, 30,
+ 31, 31, 32, 33, 34, 34, 35, 36,
+ 37, 38, 38, 39, 40, 41, 42, 43,
+ 43, 44, 45, 46, 47, 48, 49, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57,
+ 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 103, 104, 105,
+ 106, 107, 108, 109, 110, 111, 112, 113,
+ 115, 116, 117, 118, 119, 120, 121, 122,
+ 124, 125, 126, 127, 128, 129, 130, 132,
+ 133, 134, 135, 136, 137, 139, 140, 141,
+ 142, 143, 145, 146, 147, 148, 149, 151,
+ 152, 153, 154, 155, 157, 158, 159, 160,
+ 161, 163, 164, 165, 166, 168, 169, 170,
+ 171, 173, 174, 175, 176, 178, 179, 180,
+ 181, 183, 184, 185, 187, 188, 189, 190,
+ 192, 193, 194, 196, 197, 198, 200, 201,
+ 202, 203, 205, 206, 207, 209, 210, 211,
+ 213, 214, 215, 217, 218, 219, 221, 222,
+ 223, 225, 226, 227, 229, 230, 232, 233,
+ 234, 236, 237, 238, 240, 241, 242, 244,
+ 245, 247, 248, 249, 251, 252, 254, 255,
+};
+
+static const u16 xgamma8_15[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 2, 2, 2, 3, 3, 3, 4,
+ 4, 4, 5, 5, 6, 6, 6, 7,
+ 7, 8, 8, 9, 9, 10, 10, 11,
+ 11, 12, 12, 13, 14, 14, 15, 15,
+ 16, 16, 17, 18, 18, 19, 20, 20,
+ 21, 21, 22, 23, 23, 24, 25, 26,
+ 26, 27, 28, 28, 29, 30, 31, 31,
+ 32, 33, 34, 34, 35, 36, 37, 37,
+ 38, 39, 40, 41, 41, 42, 43, 44,
+ 45, 46, 46, 47, 48, 49, 50, 51,
+ 52, 53, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 112, 113, 114, 115, 116,
+ 117, 119, 120, 121, 122, 123, 124, 126,
+ 127, 128, 129, 130, 132, 133, 134, 135,
+ 136, 138, 139, 140, 141, 142, 144, 145,
+ 146, 147, 149, 150, 151, 152, 154, 155,
+ 156, 158, 159, 160, 161, 163, 164, 165,
+ 167, 168, 169, 171, 172, 173, 174, 176,
+ 177, 178, 180, 181, 182, 184, 185, 187,
+ 188, 189, 191, 192, 193, 195, 196, 197,
+ 199, 200, 202, 203, 204, 206, 207, 209,
+ 210, 211, 213, 214, 216, 217, 218, 220,
+ 221, 223, 224, 226, 227, 228, 230, 231,
+ 233, 234, 236, 237, 239, 240, 242, 243,
+ 245, 246, 248, 249, 251, 252, 254, 255,
+};
+
+static const u16 xgamma8_16[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 3,
+ 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 7, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 11, 12, 12, 13,
+ 13, 14, 14, 15, 15, 16, 16, 17,
+ 18, 18, 19, 19, 20, 21, 21, 22,
+ 23, 23, 24, 25, 25, 26, 27, 27,
+ 28, 29, 29, 30, 31, 31, 32, 33,
+ 34, 34, 35, 36, 37, 38, 38, 39,
+ 40, 41, 42, 42, 43, 44, 45, 46,
+ 46, 47, 48, 49, 50, 51, 52, 53,
+ 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92,
+ 93, 94, 95, 97, 98, 99, 100, 101,
+ 102, 103, 104, 106, 107, 108, 109, 110,
+ 111, 113, 114, 115, 116, 117, 119, 120,
+ 121, 122, 123, 125, 126, 127, 128, 130,
+ 131, 132, 133, 135, 136, 137, 138, 140,
+ 141, 142, 143, 145, 146, 147, 149, 150,
+ 151, 153, 154, 155, 157, 158, 159, 161,
+ 162, 163, 165, 166, 167, 169, 170, 171,
+ 173, 174, 176, 177, 178, 180, 181, 183,
+ 184, 185, 187, 188, 190, 191, 193, 194,
+ 196, 197, 198, 200, 201, 203, 204, 206,
+ 207, 209, 210, 212, 213, 215, 216, 218,
+ 219, 221, 222, 224, 225, 227, 228, 230,
+ 231, 233, 235, 236, 238, 239, 241, 242,
+ 244, 245, 247, 249, 250, 252, 253, 255,
+};
+
+static const u16 xgamma8_17[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 3, 3, 3, 3, 4, 4, 4,
+ 5, 5, 5, 6, 6, 6, 7, 7,
+ 7, 8, 8, 9, 9, 10, 10, 10,
+ 11, 11, 12, 12, 13, 13, 14, 14,
+ 15, 15, 16, 17, 17, 18, 18, 19,
+ 19, 20, 21, 21, 22, 22, 23, 24,
+ 24, 25, 26, 26, 27, 28, 28, 29,
+ 30, 30, 31, 32, 33, 33, 34, 35,
+ 36, 36, 37, 38, 39, 39, 40, 41,
+ 42, 43, 43, 44, 45, 46, 47, 48,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 97, 98, 99, 100, 101, 102, 103, 105,
+ 106, 107, 108, 109, 111, 112, 113, 114,
+ 115, 117, 118, 119, 120, 122, 123, 124,
+ 125, 127, 128, 129, 131, 132, 133, 134,
+ 136, 137, 138, 140, 141, 142, 144, 145,
+ 146, 148, 149, 151, 152, 153, 155, 156,
+ 157, 159, 160, 162, 163, 164, 166, 167,
+ 169, 170, 172, 173, 174, 176, 177, 179,
+ 180, 182, 183, 185, 186, 188, 189, 191,
+ 192, 194, 195, 197, 198, 200, 201, 203,
+ 205, 206, 208, 209, 211, 212, 214, 216,
+ 217, 219, 220, 222, 224, 225, 227, 228,
+ 230, 232, 233, 235, 237, 238, 240, 242,
+ 243, 245, 247, 248, 250, 252, 253, 255,
+};
+
+static const u16 xgamma8_18[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 6,
+ 6, 6, 7, 7, 8, 8, 8, 9,
+ 9, 10, 10, 10, 11, 11, 12, 12,
+ 13, 13, 14, 14, 15, 15, 16, 16,
+ 17, 17, 18, 18, 19, 19, 20, 21,
+ 21, 22, 22, 23, 24, 24, 25, 26,
+ 26, 27, 28, 28, 29, 30, 30, 31,
+ 32, 32, 33, 34, 35, 35, 36, 37,
+ 38, 38, 39, 40, 41, 41, 42, 43,
+ 44, 45, 46, 46, 47, 48, 49, 50,
+ 51, 52, 53, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 86, 87, 88, 89, 90,
+ 91, 92, 93, 95, 96, 97, 98, 99,
+ 100, 102, 103, 104, 105, 107, 108, 109,
+ 110, 111, 113, 114, 115, 116, 118, 119,
+ 120, 122, 123, 124, 126, 127, 128, 129,
+ 131, 132, 134, 135, 136, 138, 139, 140,
+ 142, 143, 145, 146, 147, 149, 150, 152,
+ 153, 154, 156, 157, 159, 160, 162, 163,
+ 165, 166, 168, 169, 171, 172, 174, 175,
+ 177, 178, 180, 181, 183, 184, 186, 188,
+ 189, 191, 192, 194, 195, 197, 199, 200,
+ 202, 204, 205, 207, 208, 210, 212, 213,
+ 215, 217, 218, 220, 222, 224, 225, 227,
+ 229, 230, 232, 234, 236, 237, 239, 241,
+ 243, 244, 246, 248, 250, 251, 253, 255,
+};
+
+static const u16 xgamma8_19[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 4, 4, 4, 4, 5,
+ 5, 5, 6, 6, 6, 7, 7, 7,
+ 8, 8, 8, 9, 9, 9, 10, 10,
+ 11, 11, 12, 12, 12, 13, 13, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18,
+ 18, 19, 20, 20, 21, 21, 22, 22,
+ 23, 24, 24, 25, 26, 26, 27, 28,
+ 28, 29, 30, 30, 31, 32, 32, 33,
+ 34, 35, 35, 36, 37, 38, 38, 39,
+ 40, 41, 41, 42, 43, 44, 45, 46,
+ 46, 47, 48, 49, 50, 51, 52, 53,
+ 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 81, 82, 83, 84, 85,
+ 86, 87, 88, 90, 91, 92, 93, 94,
+ 95, 97, 98, 99, 100, 101, 103, 104,
+ 105, 106, 108, 109, 110, 112, 113, 114,
+ 115, 117, 118, 119, 121, 122, 123, 125,
+ 126, 127, 129, 130, 132, 133, 134, 136,
+ 137, 139, 140, 141, 143, 144, 146, 147,
+ 149, 150, 152, 153, 155, 156, 158, 159,
+ 161, 162, 164, 165, 167, 168, 170, 172,
+ 173, 175, 176, 178, 180, 181, 183, 184,
+ 186, 188, 189, 191, 193, 194, 196, 198,
+ 199, 201, 203, 204, 206, 208, 210, 211,
+ 213, 215, 217, 218, 220, 222, 224, 225,
+ 227, 229, 231, 233, 235, 236, 238, 240,
+ 242, 244, 246, 247, 249, 251, 253, 255,
+};
+
+static const u16 xgamma8_20[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 4, 4,
+ 4, 4, 5, 5, 5, 5, 6, 6,
+ 6, 7, 7, 7, 8, 8, 8, 9,
+ 9, 9, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 14, 14, 15, 15, 16,
+ 16, 17, 17, 18, 18, 19, 19, 20,
+ 20, 21, 21, 22, 23, 23, 24, 24,
+ 25, 26, 26, 27, 28, 28, 29, 30,
+ 30, 31, 32, 32, 33, 34, 35, 35,
+ 36, 37, 38, 38, 39, 40, 41, 42,
+ 42, 43, 44, 45, 46, 47, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 84, 85, 86, 87, 88, 89,
+ 91, 92, 93, 94, 95, 97, 98, 99,
+ 100, 102, 103, 104, 105, 107, 108, 109,
+ 111, 112, 113, 115, 116, 117, 119, 120,
+ 121, 123, 124, 126, 127, 128, 130, 131,
+ 133, 134, 136, 137, 139, 140, 142, 143,
+ 145, 146, 148, 149, 151, 152, 154, 155,
+ 157, 158, 160, 162, 163, 165, 166, 168,
+ 170, 171, 173, 175, 176, 178, 180, 181,
+ 183, 185, 186, 188, 190, 192, 193, 195,
+ 197, 199, 200, 202, 204, 206, 207, 209,
+ 211, 213, 215, 217, 218, 220, 222, 224,
+ 226, 228, 230, 232, 233, 235, 237, 239,
+ 241, 243, 245, 247, 249, 251, 253, 255,
+};
+
+static const u16 xgamma8_21[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 4, 4, 4, 4, 5, 5,
+ 5, 5, 6, 6, 6, 7, 7, 7,
+ 8, 8, 8, 9, 9, 9, 10, 10,
+ 11, 11, 11, 12, 12, 13, 13, 14,
+ 14, 14, 15, 15, 16, 16, 17, 17,
+ 18, 18, 19, 20, 20, 21, 21, 22,
+ 22, 23, 24, 24, 25, 25, 26, 27,
+ 27, 28, 29, 29, 30, 31, 31, 32,
+ 33, 33, 34, 35, 36, 36, 37, 38,
+ 39, 40, 40, 41, 42, 43, 44, 44,
+ 45, 46, 47, 48, 49, 50, 51, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 75, 76,
+ 77, 78, 79, 80, 81, 83, 84, 85,
+ 86, 87, 88, 90, 91, 92, 93, 95,
+ 96, 97, 98, 100, 101, 102, 104, 105,
+ 106, 107, 109, 110, 112, 113, 114, 116,
+ 117, 118, 120, 121, 123, 124, 126, 127,
+ 129, 130, 131, 133, 134, 136, 137, 139,
+ 141, 142, 144, 145, 147, 148, 150, 151,
+ 153, 155, 156, 158, 160, 161, 163, 165,
+ 166, 168, 170, 171, 173, 175, 176, 178,
+ 180, 182, 183, 185, 187, 189, 191, 192,
+ 194, 196, 198, 200, 202, 203, 205, 207,
+ 209, 211, 213, 215, 217, 219, 221, 223,
+ 225, 226, 228, 230, 232, 234, 236, 238,
+ 241, 243, 245, 247, 249, 251, 253, 255,
+};
+
+static const u16 xgamma8_22[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 5, 5, 5, 5, 6, 6, 6,
+ 6, 7, 7, 7, 8, 8, 8, 9,
+ 9, 9, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 13, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 22, 22, 23, 23, 24,
+ 25, 25, 26, 26, 27, 28, 28, 29,
+ 30, 30, 31, 32, 33, 33, 34, 35,
+ 35, 36, 37, 38, 39, 39, 40, 41,
+ 42, 43, 43, 44, 45, 46, 47, 48,
+ 49, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 73, 74, 75, 76, 77, 78, 79, 81,
+ 82, 83, 84, 85, 87, 88, 89, 90,
+ 91, 93, 94, 95, 97, 98, 99, 100,
+ 102, 103, 105, 106, 107, 109, 110, 111,
+ 113, 114, 116, 117, 119, 120, 121, 123,
+ 124, 126, 127, 129, 130, 132, 133, 135,
+ 137, 138, 140, 141, 143, 145, 146, 148,
+ 149, 151, 153, 154, 156, 158, 159, 161,
+ 163, 165, 166, 168, 170, 172, 173, 175,
+ 177, 179, 181, 182, 184, 186, 188, 190,
+ 192, 194, 196, 197, 199, 201, 203, 205,
+ 207, 209, 211, 213, 215, 217, 219, 221,
+ 223, 225, 227, 229, 231, 234, 236, 238,
+ 240, 242, 244, 246, 248, 251, 253, 255,
+};
+
+static const u16 xgamma8_23[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 9, 9, 10, 10, 10,
+ 11, 11, 11, 12, 12, 13, 13, 13,
+ 14, 14, 15, 15, 16, 16, 17, 17,
+ 18, 18, 19, 19, 20, 20, 21, 21,
+ 22, 23, 23, 24, 24, 25, 26, 26,
+ 27, 28, 28, 29, 30, 30, 31, 32,
+ 32, 33, 34, 35, 35, 36, 37, 38,
+ 38, 39, 40, 41, 42, 42, 43, 44,
+ 45, 46, 47, 48, 49, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+ 78, 79, 80, 81, 82, 84, 85, 86,
+ 87, 89, 90, 91, 92, 94, 95, 96,
+ 98, 99, 100, 102, 103, 104, 106, 107,
+ 109, 110, 112, 113, 114, 116, 117, 119,
+ 120, 122, 123, 125, 126, 128, 130, 131,
+ 133, 134, 136, 138, 139, 141, 143, 144,
+ 146, 148, 149, 151, 153, 154, 156, 158,
+ 160, 161, 163, 165, 167, 169, 170, 172,
+ 174, 176, 178, 180, 182, 183, 185, 187,
+ 189, 191, 193, 195, 197, 199, 201, 203,
+ 205, 207, 209, 211, 213, 215, 218, 220,
+ 222, 224, 226, 228, 230, 233, 235, 237,
+ 239, 241, 244, 246, 248, 250, 253, 255,
+};
+
+static const u16 xgamma8_24[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 14, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 22, 22, 23, 23, 24,
+ 24, 25, 26, 26, 27, 28, 28, 29,
+ 30, 30, 31, 32, 32, 33, 34, 35,
+ 35, 36, 37, 38, 39, 39, 40, 41,
+ 42, 43, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 53, 54, 55,
+ 56, 57, 58, 59, 60, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 73,
+ 74, 75, 76, 77, 78, 80, 81, 82,
+ 83, 85, 86, 87, 88, 90, 91, 92,
+ 94, 95, 96, 98, 99, 100, 102, 103,
+ 105, 106, 108, 109, 111, 112, 114, 115,
+ 117, 118, 120, 121, 123, 124, 126, 127,
+ 129, 131, 132, 134, 136, 137, 139, 141,
+ 142, 144, 146, 148, 149, 151, 153, 155,
+ 156, 158, 160, 162, 164, 166, 167, 169,
+ 171, 173, 175, 177, 179, 181, 183, 185,
+ 187, 189, 191, 193, 195, 197, 199, 201,
+ 203, 205, 207, 210, 212, 214, 216, 218,
+ 220, 223, 225, 227, 229, 232, 234, 236,
+ 239, 241, 243, 246, 248, 250, 253, 255,
+};
+
+static const u16 xgamma8_25[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 5, 5, 5, 5, 6,
+ 6, 6, 6, 7, 7, 7, 7, 8,
+ 8, 8, 9, 9, 9, 10, 10, 10,
+ 11, 11, 12, 12, 12, 13, 13, 14,
+ 14, 15, 15, 15, 16, 16, 17, 17,
+ 18, 18, 19, 19, 20, 20, 21, 22,
+ 22, 23, 23, 24, 25, 25, 26, 26,
+ 27, 28, 28, 29, 30, 30, 31, 32,
+ 33, 33, 34, 35, 36, 36, 37, 38,
+ 39, 40, 40, 41, 42, 43, 44, 45,
+ 46, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 67, 68, 69,
+ 70, 71, 72, 73, 75, 76, 77, 78,
+ 80, 81, 82, 83, 85, 86, 87, 89,
+ 90, 91, 93, 94, 95, 97, 98, 99,
+ 101, 102, 104, 105, 107, 108, 110, 111,
+ 113, 114, 116, 117, 119, 121, 122, 124,
+ 125, 127, 129, 130, 132, 134, 135, 137,
+ 139, 141, 142, 144, 146, 148, 150, 151,
+ 153, 155, 157, 159, 161, 163, 165, 166,
+ 168, 170, 172, 174, 176, 178, 180, 182,
+ 184, 186, 189, 191, 193, 195, 197, 199,
+ 201, 204, 206, 208, 210, 212, 215, 217,
+ 219, 221, 224, 226, 228, 231, 233, 235,
+ 238, 240, 243, 245, 248, 250, 253, 255,
+};
+
+static const u16 xgamma8_26[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 7,
+ 7, 7, 8, 8, 8, 9, 9, 9,
+ 10, 10, 10, 11, 11, 11, 12, 12,
+ 13, 13, 13, 14, 14, 15, 15, 16,
+ 16, 17, 17, 18, 18, 19, 19, 20,
+ 20, 21, 21, 22, 22, 23, 24, 24,
+ 25, 25, 26, 27, 27, 28, 29, 29,
+ 30, 31, 31, 32, 33, 34, 34, 35,
+ 36, 37, 38, 38, 39, 40, 41, 42,
+ 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 68, 69, 70, 71, 72, 73, 75,
+ 76, 77, 78, 80, 81, 82, 84, 85,
+ 86, 88, 89, 90, 92, 93, 94, 96,
+ 97, 99, 100, 102, 103, 105, 106, 108,
+ 109, 111, 112, 114, 115, 117, 119, 120,
+ 122, 124, 125, 127, 129, 130, 132, 134,
+ 136, 137, 139, 141, 143, 145, 146, 148,
+ 150, 152, 154, 156, 158, 160, 162, 164,
+ 166, 168, 170, 172, 174, 176, 178, 180,
+ 182, 184, 186, 188, 191, 193, 195, 197,
+ 199, 202, 204, 206, 209, 211, 213, 215,
+ 218, 220, 223, 225, 227, 230, 232, 235,
+ 237, 240, 242, 245, 247, 250, 252, 255,
+};
+
+static const u16 xgamma8_27[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 6, 6,
+ 6, 6, 7, 7, 7, 7, 8, 8,
+ 8, 9, 9, 9, 10, 10, 10, 11,
+ 11, 12, 12, 12, 13, 13, 14, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18,
+ 18, 19, 19, 20, 20, 21, 21, 22,
+ 23, 23, 24, 24, 25, 26, 26, 27,
+ 28, 28, 29, 30, 30, 31, 32, 33,
+ 33, 34, 35, 36, 36, 37, 38, 39,
+ 40, 41, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 51, 52, 53,
+ 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 68, 69, 70, 71,
+ 72, 74, 75, 76, 77, 79, 80, 81,
+ 83, 84, 85, 87, 88, 89, 91, 92,
+ 94, 95, 97, 98, 100, 101, 103, 104,
+ 106, 107, 109, 110, 112, 114, 115, 117,
+ 119, 120, 122, 124, 125, 127, 129, 131,
+ 132, 134, 136, 138, 140, 141, 143, 145,
+ 147, 149, 151, 153, 155, 157, 159, 161,
+ 163, 165, 167, 169, 171, 173, 175, 178,
+ 180, 182, 184, 186, 188, 191, 193, 195,
+ 198, 200, 202, 205, 207, 209, 212, 214,
+ 216, 219, 221, 224, 226, 229, 231, 234,
+ 237, 239, 242, 244, 247, 250, 252, 255,
+};
+
+static const u16 xgamma8_28[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 6, 6, 6, 6, 7, 7, 7,
+ 7, 8, 8, 8, 9, 9, 9, 10,
+ 10, 10, 11, 11, 11, 12, 12, 13,
+ 13, 13, 14, 14, 15, 15, 16, 16,
+ 17, 17, 18, 18, 19, 19, 20, 20,
+ 21, 21, 22, 22, 23, 24, 24, 25,
+ 25, 26, 27, 27, 28, 29, 29, 30,
+ 31, 32, 32, 33, 34, 35, 35, 36,
+ 37, 38, 39, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 50,
+ 51, 52, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 66, 67, 68,
+ 69, 70, 72, 73, 74, 75, 77, 78,
+ 79, 81, 82, 83, 85, 86, 87, 89,
+ 90, 92, 93, 95, 96, 98, 99, 101,
+ 102, 104, 105, 107, 109, 110, 112, 114,
+ 115, 117, 119, 120, 122, 124, 126, 127,
+ 129, 131, 133, 135, 137, 138, 140, 142,
+ 144, 146, 148, 150, 152, 154, 156, 158,
+ 160, 162, 164, 167, 169, 171, 173, 175,
+ 177, 180, 182, 184, 186, 189, 191, 193,
+ 196, 198, 200, 203, 205, 208, 210, 213,
+ 215, 218, 220, 223, 225, 228, 231, 233,
+ 236, 239, 241, 244, 247, 249, 252, 255,
+};
+
+static const u16 xgamma8_29[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 7, 8, 8, 8, 9,
+ 9, 9, 9, 10, 10, 11, 11, 11,
+ 12, 12, 12, 13, 13, 14, 14, 15,
+ 15, 15, 16, 16, 17, 17, 18, 18,
+ 19, 19, 20, 21, 21, 22, 22, 23,
+ 23, 24, 25, 25, 26, 27, 27, 28,
+ 29, 29, 30, 31, 32, 32, 33, 34,
+ 35, 35, 36, 37, 38, 39, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 64, 65,
+ 66, 67, 68, 70, 71, 72, 73, 75,
+ 76, 77, 79, 80, 81, 83, 84, 86,
+ 87, 88, 90, 91, 93, 94, 96, 97,
+ 99, 101, 102, 104, 105, 107, 109, 110,
+ 112, 114, 115, 117, 119, 121, 122, 124,
+ 126, 128, 130, 132, 134, 135, 137, 139,
+ 141, 143, 145, 147, 149, 151, 153, 155,
+ 158, 160, 162, 164, 166, 168, 171, 173,
+ 175, 177, 180, 182, 184, 187, 189, 191,
+ 194, 196, 199, 201, 204, 206, 209, 211,
+ 214, 216, 219, 222, 224, 227, 230, 232,
+ 235, 238, 241, 244, 246, 249, 252, 255,
+};
+
+static const u16 xgamma8_30[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 5, 5, 5, 5, 6,
+ 6, 6, 6, 6, 7, 7, 7, 8,
+ 8, 8, 8, 9, 9, 9, 10, 10,
+ 10, 11, 11, 12, 12, 12, 13, 13,
+ 14, 14, 14, 15, 15, 16, 16, 17,
+ 17, 18, 18, 19, 19, 20, 20, 21,
+ 22, 22, 23, 23, 24, 25, 25, 26,
+ 27, 27, 28, 29, 29, 30, 31, 32,
+ 32, 33, 34, 35, 35, 36, 37, 38,
+ 39, 40, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 60, 61, 62,
+ 63, 64, 65, 67, 68, 69, 70, 72,
+ 73, 74, 76, 77, 78, 80, 81, 82,
+ 84, 85, 87, 88, 90, 91, 93, 94,
+ 96, 97, 99, 101, 102, 104, 105, 107,
+ 109, 111, 112, 114, 116, 118, 119, 121,
+ 123, 125, 127, 129, 131, 132, 134, 136,
+ 138, 140, 142, 144, 147, 149, 151, 153,
+ 155, 157, 159, 162, 164, 166, 168, 171,
+ 173, 175, 178, 180, 182, 185, 187, 190,
+ 192, 195, 197, 200, 202, 205, 207, 210,
+ 213, 215, 218, 221, 223, 226, 229, 232,
+ 235, 237, 240, 243, 246, 249, 252, 255,
+};
+
+static const u16 xgamma8_31[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 6, 6, 6, 6, 6, 7,
+ 7, 7, 8, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 12, 12,
+ 12, 13, 13, 14, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 22, 22, 23, 23, 24,
+ 25, 25, 26, 27, 27, 28, 29, 29,
+ 30, 31, 32, 32, 33, 34, 35, 36,
+ 36, 37, 38, 39, 40, 41, 42, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 56, 57, 58, 59,
+ 60, 61, 62, 64, 65, 66, 67, 69,
+ 70, 71, 73, 74, 75, 77, 78, 79,
+ 81, 82, 84, 85, 87, 88, 90, 91,
+ 93, 94, 96, 97, 99, 101, 102, 104,
+ 106, 108, 109, 111, 113, 115, 116, 118,
+ 120, 122, 124, 126, 128, 130, 132, 134,
+ 136, 138, 140, 142, 144, 146, 148, 150,
+ 152, 155, 157, 159, 161, 164, 166, 168,
+ 171, 173, 175, 178, 180, 183, 185, 188,
+ 190, 193, 195, 198, 201, 203, 206, 209,
+ 211, 214, 217, 220, 222, 225, 228, 231,
+ 234, 237, 240, 243, 246, 249, 252, 255,
+};
+
+static const u16 xgamma8_32[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 7, 7, 7, 8, 8, 8,
+ 8, 9, 9, 9, 10, 10, 10, 11,
+ 11, 12, 12, 12, 13, 13, 14, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18,
+ 18, 19, 19, 20, 21, 21, 22, 22,
+ 23, 23, 24, 25, 25, 26, 27, 27,
+ 28, 29, 30, 30, 31, 32, 33, 33,
+ 34, 35, 36, 37, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 59, 60, 61, 62, 63, 65, 66,
+ 67, 68, 70, 71, 72, 74, 75, 76,
+ 78, 79, 81, 82, 84, 85, 87, 88,
+ 90, 91, 93, 95, 96, 98, 99, 101,
+ 103, 105, 106, 108, 110, 112, 113, 115,
+ 117, 119, 121, 123, 125, 127, 129, 131,
+ 133, 135, 137, 139, 141, 143, 146, 148,
+ 150, 152, 154, 157, 159, 161, 164, 166,
+ 168, 171, 173, 176, 178, 181, 183, 186,
+ 188, 191, 194, 196, 199, 202, 204, 207,
+ 210, 213, 216, 219, 221, 224, 227, 230,
+ 233, 236, 239, 242, 246, 249, 252, 255,
+};
+
+static const u16 xgamma8_33[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5,
+ 6, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 10,
+ 10, 11, 11, 11, 12, 12, 12, 13,
+ 13, 14, 14, 15, 15, 15, 16, 16,
+ 17, 17, 18, 18, 19, 19, 20, 21,
+ 21, 22, 22, 23, 24, 24, 25, 26,
+ 26, 27, 28, 28, 29, 30, 31, 31,
+ 32, 33, 34, 34, 35, 36, 37, 38,
+ 39, 40, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 53, 54,
+ 55, 56, 57, 58, 59, 61, 62, 63,
+ 64, 66, 67, 68, 70, 71, 72, 74,
+ 75, 76, 78, 79, 81, 82, 84, 85,
+ 87, 88, 90, 92, 93, 95, 97, 98,
+ 100, 102, 103, 105, 107, 109, 111, 113,
+ 114, 116, 118, 120, 122, 124, 126, 128,
+ 130, 132, 134, 136, 139, 141, 143, 145,
+ 147, 150, 152, 154, 157, 159, 161, 164,
+ 166, 169, 171, 174, 176, 179, 181, 184,
+ 187, 189, 192, 195, 198, 200, 203, 206,
+ 209, 212, 215, 217, 220, 223, 226, 230,
+ 233, 236, 239, 242, 245, 248, 252, 255,
+};
+
+static const u16 xgamma8_34[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 7,
+ 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 11, 12,
+ 12, 12, 13, 13, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 21, 22, 23, 23, 24,
+ 24, 25, 26, 26, 27, 28, 29, 29,
+ 30, 31, 32, 32, 33, 34, 35, 36,
+ 37, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 55, 56, 57, 58, 59, 60,
+ 62, 63, 64, 66, 67, 68, 70, 71,
+ 72, 74, 75, 77, 78, 80, 81, 83,
+ 84, 86, 87, 89, 90, 92, 94, 95,
+ 97, 99, 101, 102, 104, 106, 108, 110,
+ 112, 114, 115, 117, 119, 121, 123, 125,
+ 128, 130, 132, 134, 136, 138, 141, 143,
+ 145, 147, 150, 152, 154, 157, 159, 162,
+ 164, 167, 169, 172, 174, 177, 180, 182,
+ 185, 188, 190, 193, 196, 199, 202, 205,
+ 208, 210, 213, 216, 219, 223, 226, 229,
+ 232, 235, 238, 242, 245, 248, 252, 255,
+};
+
+static const u16 xgamma8_35[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 5, 6, 6,
+ 6, 6, 7, 7, 7, 7, 8, 8,
+ 8, 9, 9, 9, 10, 10, 10, 11,
+ 11, 11, 12, 12, 13, 13, 13, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18,
+ 18, 19, 19, 20, 20, 21, 22, 22,
+ 23, 23, 24, 25, 25, 26, 27, 28,
+ 28, 29, 30, 30, 31, 32, 33, 34,
+ 35, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 56, 57, 58,
+ 59, 60, 62, 63, 64, 66, 67, 68,
+ 70, 71, 72, 74, 75, 77, 78, 80,
+ 81, 83, 85, 86, 88, 89, 91, 93,
+ 94, 96, 98, 100, 102, 103, 105, 107,
+ 109, 111, 113, 115, 117, 119, 121, 123,
+ 125, 127, 129, 131, 134, 136, 138, 140,
+ 143, 145, 147, 150, 152, 155, 157, 159,
+ 162, 165, 167, 170, 172, 175, 178, 180,
+ 183, 186, 189, 192, 194, 197, 200, 203,
+ 206, 209, 212, 215, 219, 222, 225, 228,
+ 231, 235, 238, 241, 245, 248, 252, 255,
+};
+
+static const u16 xgamma8_36[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 7, 7, 7,
+ 8, 8, 8, 8, 9, 9, 9, 10,
+ 10, 10, 11, 11, 12, 12, 12, 13,
+ 13, 14, 14, 15, 15, 15, 16, 16,
+ 17, 17, 18, 18, 19, 20, 20, 21,
+ 21, 22, 23, 23, 24, 24, 25, 26,
+ 27, 27, 28, 29, 29, 30, 31, 32,
+ 33, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 42, 43, 44, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 56,
+ 57, 58, 59, 61, 62, 63, 64, 66,
+ 67, 69, 70, 71, 73, 74, 76, 77,
+ 79, 80, 82, 83, 85, 87, 88, 90,
+ 92, 94, 95, 97, 99, 101, 103, 104,
+ 106, 108, 110, 112, 114, 116, 118, 120,
+ 122, 125, 127, 129, 131, 133, 136, 138,
+ 140, 143, 145, 147, 150, 152, 155, 157,
+ 160, 162, 165, 168, 170, 173, 176, 179,
+ 181, 184, 187, 190, 193, 196, 199, 202,
+ 205, 208, 211, 214, 218, 221, 224, 227,
+ 231, 234, 237, 241, 244, 248, 251, 255,
+};
+
+static const u16 xgamma8_37[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 7,
+ 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 13, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 22, 22, 23, 24, 24,
+ 25, 26, 26, 27, 28, 28, 29, 30,
+ 31, 32, 32, 33, 34, 35, 36, 37,
+ 38, 39, 39, 40, 41, 42, 43, 44,
+ 45, 47, 48, 49, 50, 51, 52, 53,
+ 54, 56, 57, 58, 59, 61, 62, 63,
+ 65, 66, 67, 69, 70, 72, 73, 75,
+ 76, 78, 79, 81, 83, 84, 86, 88,
+ 89, 91, 93, 95, 96, 98, 100, 102,
+ 104, 106, 108, 110, 112, 114, 116, 118,
+ 120, 122, 124, 127, 129, 131, 133, 136,
+ 138, 140, 143, 145, 148, 150, 153, 155,
+ 158, 160, 163, 166, 169, 171, 174, 177,
+ 180, 183, 186, 188, 191, 194, 198, 201,
+ 204, 207, 210, 213, 217, 220, 223, 227,
+ 230, 233, 237, 241, 244, 248, 251, 255,
+};
+
+static const u16 xgamma8_38[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 7, 7, 7, 8, 8, 8,
+ 8, 9, 9, 9, 10, 10, 10, 11,
+ 11, 12, 12, 12, 13, 13, 14, 14,
+ 15, 15, 15, 16, 16, 17, 18, 18,
+ 19, 19, 20, 20, 21, 21, 22, 23,
+ 23, 24, 25, 25, 26, 27, 28, 28,
+ 29, 30, 31, 31, 32, 33, 34, 35,
+ 36, 37, 38, 38, 39, 40, 41, 42,
+ 43, 44, 45, 47, 48, 49, 50, 51,
+ 52, 53, 55, 56, 57, 58, 60, 61,
+ 62, 64, 65, 66, 68, 69, 71, 72,
+ 74, 75, 77, 78, 80, 82, 83, 85,
+ 87, 88, 90, 92, 94, 96, 98, 99,
+ 101, 103, 105, 107, 109, 111, 113, 115,
+ 118, 120, 122, 124, 126, 129, 131, 133,
+ 136, 138, 141, 143, 146, 148, 151, 153,
+ 156, 158, 161, 164, 167, 169, 172, 175,
+ 178, 181, 184, 187, 190, 193, 196, 199,
+ 203, 206, 209, 212, 216, 219, 222, 226,
+ 229, 233, 237, 240, 244, 247, 251, 255,
+};
+
+static const u16 xgamma8_39[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 4, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 9, 9, 9, 10, 10,
+ 10, 11, 11, 11, 12, 12, 13, 13,
+ 13, 14, 14, 15, 15, 16, 16, 17,
+ 17, 18, 18, 19, 20, 20, 21, 21,
+ 22, 23, 23, 24, 25, 25, 26, 27,
+ 27, 28, 29, 30, 31, 31, 32, 33,
+ 34, 35, 36, 37, 38, 38, 39, 40,
+ 41, 42, 43, 45, 46, 47, 48, 49,
+ 50, 51, 52, 54, 55, 56, 57, 59,
+ 60, 61, 63, 64, 66, 67, 68, 70,
+ 71, 73, 74, 76, 78, 79, 81, 83,
+ 84, 86, 88, 90, 91, 93, 95, 97,
+ 99, 101, 103, 105, 107, 109, 111, 113,
+ 115, 117, 120, 122, 124, 126, 129, 131,
+ 133, 136, 138, 141, 143, 146, 149, 151,
+ 154, 157, 159, 162, 165, 168, 171, 173,
+ 176, 179, 182, 185, 189, 192, 195, 198,
+ 201, 205, 208, 211, 215, 218, 222, 225,
+ 229, 232, 236, 240, 243, 247, 251, 255,
+};
+
+static const u16 xgamma8_40[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 5, 6, 6, 6, 6, 7, 7,
+ 7, 7, 8, 8, 8, 9, 9, 9,
+ 9, 10, 10, 11, 11, 11, 12, 12,
+ 13, 13, 13, 14, 14, 15, 15, 16,
+ 16, 17, 17, 18, 18, 19, 19, 20,
+ 21, 21, 22, 23, 23, 24, 25, 25,
+ 26, 27, 27, 28, 29, 30, 31, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 52, 53, 54, 55, 57,
+ 58, 59, 61, 62, 63, 65, 66, 68,
+ 69, 71, 72, 74, 75, 77, 79, 80,
+ 82, 84, 85, 87, 89, 91, 93, 95,
+ 96, 98, 100, 102, 104, 107, 109, 111,
+ 113, 115, 117, 120, 122, 124, 126, 129,
+ 131, 134, 136, 139, 141, 144, 146, 149,
+ 152, 155, 157, 160, 163, 166, 169, 172,
+ 175, 178, 181, 184, 187, 190, 194, 197,
+ 200, 203, 207, 210, 214, 217, 221, 224,
+ 228, 232, 236, 239, 243, 247, 251, 255,
+};
+
+static const u16 *xgamma8_curves[GAMMA_CURVE_LENGTH] = {
+ &xgamma8_01[0],
+ &xgamma8_02[0],
+ &xgamma8_03[0],
+ &xgamma8_04[0],
+ &xgamma8_05[0],
+ &xgamma8_06[0],
+ &xgamma8_07[0],
+ &xgamma8_08[0],
+ &xgamma8_09[0],
+ &xgamma8_10[0],
+ &xgamma8_11[0],
+ &xgamma8_12[0],
+ &xgamma8_13[0],
+ &xgamma8_14[0],
+ &xgamma8_15[0],
+ &xgamma8_16[0],
+ &xgamma8_17[0],
+ &xgamma8_18[0],
+ &xgamma8_19[0],
+ &xgamma8_20[0],
+ &xgamma8_21[0],
+ &xgamma8_22[0],
+ &xgamma8_23[0],
+ &xgamma8_24[0],
+ &xgamma8_25[0],
+ &xgamma8_26[0],
+ &xgamma8_27[0],
+ &xgamma8_28[0],
+ &xgamma8_29[0],
+ &xgamma8_30[0],
+ &xgamma8_31[0],
+ &xgamma8_32[0],
+ &xgamma8_33[0],
+ &xgamma8_34[0],
+ &xgamma8_35[0],
+ &xgamma8_36[0],
+ &xgamma8_37[0],
+ &xgamma8_38[0],
+ &xgamma8_39[0],
+ &xgamma8_40[0],
+};
+
+#define GAMMA_BPC_10 (10)
+#define GAMMA10_TABLE_LENGTH BIT(GAMMA_BPC_10)
+static const u16 xgamma10_01[GAMMA10_TABLE_LENGTH] = {
+ 0, 512, 548, 571, 588, 601, 612, 621, 630, 637, 644,
+ 650, 656, 661, 666, 671, 675, 679, 683, 687, 690, 694,
+ 697, 700, 703, 706, 709, 711, 714, 716, 719, 721, 723,
+ 726, 728, 730, 732, 734, 736, 738, 740, 742, 743, 745,
+ 747, 749, 750, 752, 753, 755, 756, 758, 759, 761, 762,
+ 764, 765, 766, 768, 769, 770, 772, 773, 774, 775, 777,
+ 778, 779, 780, 781, 782, 783, 785, 786, 787, 788, 789,
+ 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800,
+ 800, 801, 802, 803, 804, 805, 806, 807, 807, 808, 809,
+ 810, 811, 812, 812, 813, 814, 815, 815, 816, 817, 818,
+ 819, 819, 820, 821, 821, 822, 823, 824, 824, 825, 826,
+ 826, 827, 828, 828, 829, 830, 830, 831, 832, 832, 833,
+ 834, 834, 835, 835, 836, 837, 837, 838, 838, 839, 840,
+ 840, 841, 841, 842, 843, 843, 844, 844, 845, 845, 846,
+ 847, 847, 848, 848, 849, 849, 850, 850, 851, 851, 852,
+ 852, 853, 853, 854, 854, 855, 855, 856, 856, 857, 857,
+ 858, 858, 859, 859, 860, 860, 861, 861, 862, 862, 863,
+ 863, 864, 864, 864, 865, 865, 866, 866, 867, 867, 868,
+ 868, 869, 869, 869, 870, 870, 871, 871, 872, 872, 872,
+ 873, 873, 874, 874, 874, 875, 875, 876, 876, 876, 877,
+ 877, 878, 878, 878, 879, 879, 880, 880, 880, 881, 881,
+ 882, 882, 882, 883, 883, 883, 884, 884, 885, 885, 885,
+ 886, 886, 886, 887, 887, 887, 888, 888, 889, 889, 889,
+ 890, 890, 890, 891, 891, 891, 892, 892, 892, 893, 893,
+ 893, 894, 894, 894, 895, 895, 895, 896, 896, 896, 897,
+ 897, 897, 898, 898, 898, 899, 899, 899, 900, 900, 900,
+ 901, 901, 901, 902, 902, 902, 902, 903, 903, 903, 904,
+ 904, 904, 905, 905, 905, 906, 906, 906, 906, 907, 907,
+ 907, 908, 908, 908, 908, 909, 909, 909, 910, 910, 910,
+ 910, 911, 911, 911, 912, 912, 912, 912, 913, 913, 913,
+ 914, 914, 914, 914, 915, 915, 915, 915, 916, 916, 916,
+ 917, 917, 917, 917, 918, 918, 918, 918, 919, 919, 919,
+ 919, 920, 920, 920, 921, 921, 921, 921, 922, 922, 922,
+ 922, 923, 923, 923, 923, 924, 924, 924, 924, 925, 925,
+ 925, 925, 926, 926, 926, 926, 927, 927, 927, 927, 928,
+ 928, 928, 928, 928, 929, 929, 929, 929, 930, 930, 930,
+ 930, 931, 931, 931, 931, 932, 932, 932, 932, 932, 933,
+ 933, 933, 933, 934, 934, 934, 934, 935, 935, 935, 935,
+ 935, 936, 936, 936, 936, 937, 937, 937, 937, 937, 938,
+ 938, 938, 938, 939, 939, 939, 939, 939, 940, 940, 940,
+ 940, 940, 941, 941, 941, 941, 942, 942, 942, 942, 942,
+ 943, 943, 943, 943, 943, 944, 944, 944, 944, 944, 945,
+ 945, 945, 945, 945, 946, 946, 946, 946, 946, 947, 947,
+ 947, 947, 947, 948, 948, 948, 948, 948, 949, 949, 949,
+ 949, 949, 950, 950, 950, 950, 950, 951, 951, 951, 951,
+ 951, 952, 952, 952, 952, 952, 953, 953, 953, 953, 953,
+ 953, 954, 954, 954, 954, 954, 955, 955, 955, 955, 955,
+ 956, 956, 956, 956, 956, 956, 957, 957, 957, 957, 957,
+ 958, 958, 958, 958, 958, 958, 959, 959, 959, 959, 959,
+ 960, 960, 960, 960, 960, 960, 961, 961, 961, 961, 961,
+ 961, 962, 962, 962, 962, 962, 962, 963, 963, 963, 963,
+ 963, 964, 964, 964, 964, 964, 964, 965, 965, 965, 965,
+ 965, 965, 966, 966, 966, 966, 966, 966, 967, 967, 967,
+ 967, 967, 967, 968, 968, 968, 968, 968, 968, 969, 969,
+ 969, 969, 969, 969, 970, 970, 970, 970, 970, 970, 970,
+ 971, 971, 971, 971, 971, 971, 972, 972, 972, 972, 972,
+ 972, 973, 973, 973, 973, 973, 973, 974, 974, 974, 974,
+ 974, 974, 974, 975, 975, 975, 975, 975, 975, 976, 976,
+ 976, 976, 976, 976, 976, 977, 977, 977, 977, 977, 977,
+ 977, 978, 978, 978, 978, 978, 978, 979, 979, 979, 979,
+ 979, 979, 979, 980, 980, 980, 980, 980, 980, 980, 981,
+ 981, 981, 981, 981, 981, 981, 982, 982, 982, 982, 982,
+ 982, 982, 983, 983, 983, 983, 983, 983, 983, 984, 984,
+ 984, 984, 984, 984, 984, 985, 985, 985, 985, 985, 985,
+ 985, 986, 986, 986, 986, 986, 986, 986, 987, 987, 987,
+ 987, 987, 987, 987, 988, 988, 988, 988, 988, 988, 988,
+ 989, 989, 989, 989, 989, 989, 989, 989, 990, 990, 990,
+ 990, 990, 990, 990, 991, 991, 991, 991, 991, 991, 991,
+ 991, 992, 992, 992, 992, 992, 992, 992, 993, 993, 993,
+ 993, 993, 993, 993, 993, 994, 994, 994, 994, 994, 994,
+ 994, 994, 995, 995, 995, 995, 995, 995, 995, 996, 996,
+ 996, 996, 996, 996, 996, 996, 997, 997, 997, 997, 997,
+ 997, 997, 997, 998, 998, 998, 998, 998, 998, 998, 998,
+ 999, 999, 999, 999, 999, 999, 999, 999, 1000, 1000, 1000,
+ 1000, 1000, 1000, 1000, 1000, 1000, 1001, 1001, 1001, 1001, 1001,
+ 1001, 1001, 1001, 1002, 1002, 1002, 1002, 1002, 1002, 1002, 1002,
+ 1003, 1003, 1003, 1003, 1003, 1003, 1003, 1003, 1004, 1004, 1004,
+ 1004, 1004, 1004, 1004, 1004, 1004, 1005, 1005, 1005, 1005, 1005,
+ 1005, 1005, 1005, 1006, 1006, 1006, 1006, 1006, 1006, 1006, 1006,
+ 1006, 1007, 1007, 1007, 1007, 1007, 1007, 1007, 1007, 1007, 1008,
+ 1008, 1008, 1008, 1008, 1008, 1008, 1008, 1009, 1009, 1009, 1009,
+ 1009, 1009, 1009, 1009, 1009, 1010, 1010, 1010, 1010, 1010, 1010,
+ 1010, 1010, 1010, 1011, 1011, 1011, 1011, 1011, 1011, 1011, 1011,
+ 1011, 1012, 1012, 1012, 1012, 1012, 1012, 1012, 1012, 1012, 1013,
+ 1013, 1013, 1013, 1013, 1013, 1013, 1013, 1013, 1014, 1014, 1014,
+ 1014, 1014, 1014, 1014, 1014, 1014, 1014, 1015, 1015, 1015, 1015,
+ 1015, 1015, 1015, 1015, 1015, 1016, 1016, 1016, 1016, 1016, 1016,
+ 1016, 1016, 1016, 1017, 1017, 1017, 1017, 1017, 1017, 1017, 1017,
+ 1017, 1017, 1018, 1018, 1018, 1018, 1018, 1018, 1018, 1018, 1018,
+ 1019, 1019, 1019, 1019, 1019, 1019, 1019, 1019, 1019, 1019, 1020,
+ 1020, 1020, 1020, 1020, 1020, 1020, 1020, 1020, 1020, 1021, 1021,
+ 1021, 1021, 1021, 1021, 1021, 1021, 1021, 1021, 1022, 1022, 1022,
+ 1022, 1022, 1022, 1022, 1022, 1022, 1022, 1023, 1023, 1023, 1023,
+ 1023,
+};
+
+static const u16 xgamma10_02[GAMMA10_TABLE_LENGTH] = {
+ 0, 256, 294, 319, 338, 353, 366, 378, 388, 397, 405,
+ 413, 420, 427, 434, 440, 445, 451, 456, 461, 466, 470,
+ 475, 479, 483, 487, 491, 495, 498, 502, 505, 508, 512,
+ 515, 518, 521, 524, 527, 529, 532, 535, 538, 540, 543,
+ 545, 548, 550, 552, 555, 557, 559, 562, 564, 566, 568,
+ 570, 572, 574, 576, 578, 580, 582, 584, 586, 588, 589,
+ 591, 593, 595, 597, 598, 600, 602, 603, 605, 607, 608,
+ 610, 611, 613, 614, 616, 618, 619, 621, 622, 623, 625,
+ 626, 628, 629, 631, 632, 633, 635, 636, 637, 639, 640,
+ 641, 643, 644, 645, 646, 648, 649, 650, 651, 653, 654,
+ 655, 656, 657, 658, 660, 661, 662, 663, 664, 665, 666,
+ 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678,
+ 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689,
+ 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700,
+ 700, 701, 702, 703, 704, 705, 706, 707, 708, 708, 709,
+ 710, 711, 712, 713, 714, 714, 715, 716, 717, 718, 719,
+ 719, 720, 721, 722, 723, 723, 724, 725, 726, 727, 727,
+ 728, 729, 730, 731, 731, 732, 733, 734, 734, 735, 736,
+ 737, 737, 738, 739, 740, 740, 741, 742, 742, 743, 744,
+ 745, 745, 746, 747, 747, 748, 749, 750, 750, 751, 752,
+ 752, 753, 754, 754, 755, 756, 756, 757, 758, 758, 759,
+ 760, 760, 761, 762, 762, 763, 764, 764, 765, 765, 766,
+ 767, 767, 768, 769, 769, 770, 771, 771, 772, 772, 773,
+ 774, 774, 775, 775, 776, 777, 777, 778, 778, 779, 780,
+ 780, 781, 781, 782, 783, 783, 784, 784, 785, 785, 786,
+ 787, 787, 788, 788, 789, 789, 790, 791, 791, 792, 792,
+ 793, 793, 794, 794, 795, 796, 796, 797, 797, 798, 798,
+ 799, 799, 800, 800, 801, 801, 802, 803, 803, 804, 804,
+ 805, 805, 806, 806, 807, 807, 808, 808, 809, 809, 810,
+ 810, 811, 811, 812, 812, 813, 813, 814, 814, 815, 815,
+ 816, 816, 817, 817, 818, 818, 819, 819, 820, 820, 821,
+ 821, 822, 822, 823, 823, 824, 824, 825, 825, 825, 826,
+ 826, 827, 827, 828, 828, 829, 829, 830, 830, 831, 831,
+ 832, 832, 832, 833, 833, 834, 834, 835, 835, 836, 836,
+ 837, 837, 837, 838, 838, 839, 839, 840, 840, 841, 841,
+ 841, 842, 842, 843, 843, 844, 844, 844, 845, 845, 846,
+ 846, 847, 847, 847, 848, 848, 849, 849, 850, 850, 850,
+ 851, 851, 852, 852, 852, 853, 853, 854, 854, 855, 855,
+ 855, 856, 856, 857, 857, 857, 858, 858, 859, 859, 859,
+ 860, 860, 861, 861, 861, 862, 862, 863, 863, 863, 864,
+ 864, 865, 865, 865, 866, 866, 866, 867, 867, 868, 868,
+ 868, 869, 869, 870, 870, 870, 871, 871, 871, 872, 872,
+ 873, 873, 873, 874, 874, 875, 875, 875, 876, 876, 876,
+ 877, 877, 877, 878, 878, 879, 879, 879, 880, 880, 880,
+ 881, 881, 882, 882, 882, 883, 883, 883, 884, 884, 884,
+ 885, 885, 885, 886, 886, 887, 887, 887, 888, 888, 888,
+ 889, 889, 889, 890, 890, 890, 891, 891, 891, 892, 892,
+ 892, 893, 893, 894, 894, 894, 895, 895, 895, 896, 896,
+ 896, 897, 897, 897, 898, 898, 898, 899, 899, 899, 900,
+ 900, 900, 901, 901, 901, 902, 902, 902, 903, 903, 903,
+ 904, 904, 904, 905, 905, 905, 906, 906, 906, 907, 907,
+ 907, 908, 908, 908, 908, 909, 909, 909, 910, 910, 910,
+ 911, 911, 911, 912, 912, 912, 913, 913, 913, 914, 914,
+ 914, 914, 915, 915, 915, 916, 916, 916, 917, 917, 917,
+ 918, 918, 918, 919, 919, 919, 919, 920, 920, 920, 921,
+ 921, 921, 922, 922, 922, 923, 923, 923, 923, 924, 924,
+ 924, 925, 925, 925, 926, 926, 926, 926, 927, 927, 927,
+ 928, 928, 928, 928, 929, 929, 929, 930, 930, 930, 931,
+ 931, 931, 931, 932, 932, 932, 933, 933, 933, 933, 934,
+ 934, 934, 935, 935, 935, 935, 936, 936, 936, 937, 937,
+ 937, 937, 938, 938, 938, 939, 939, 939, 939, 940, 940,
+ 940, 941, 941, 941, 941, 942, 942, 942, 942, 943, 943,
+ 943, 944, 944, 944, 944, 945, 945, 945, 946, 946, 946,
+ 946, 947, 947, 947, 947, 948, 948, 948, 949, 949, 949,
+ 949, 950, 950, 950, 950, 951, 951, 951, 951, 952, 952,
+ 952, 953, 953, 953, 953, 954, 954, 954, 954, 955, 955,
+ 955, 955, 956, 956, 956, 956, 957, 957, 957, 958, 958,
+ 958, 958, 959, 959, 959, 959, 960, 960, 960, 960, 961,
+ 961, 961, 961, 962, 962, 962, 962, 963, 963, 963, 963,
+ 964, 964, 964, 964, 965, 965, 965, 965, 966, 966, 966,
+ 966, 967, 967, 967, 967, 968, 968, 968, 968, 969, 969,
+ 969, 969, 970, 970, 970, 970, 971, 971, 971, 971, 972,
+ 972, 972, 972, 973, 973, 973, 973, 974, 974, 974, 974,
+ 975, 975, 975, 975, 976, 976, 976, 976, 977, 977, 977,
+ 977, 978, 978, 978, 978, 978, 979, 979, 979, 979, 980,
+ 980, 980, 980, 981, 981, 981, 981, 982, 982, 982, 982,
+ 983, 983, 983, 983, 983, 984, 984, 984, 984, 985, 985,
+ 985, 985, 986, 986, 986, 986, 986, 987, 987, 987, 987,
+ 988, 988, 988, 988, 989, 989, 989, 989, 989, 990, 990,
+ 990, 990, 991, 991, 991, 991, 992, 992, 992, 992, 992,
+ 993, 993, 993, 993, 994, 994, 994, 994, 994, 995, 995,
+ 995, 995, 996, 996, 996, 996, 996, 997, 997, 997, 997,
+ 998, 998, 998, 998, 998, 999, 999, 999, 999, 1000, 1000,
+ 1000, 1000, 1000, 1001, 1001, 1001, 1001, 1002, 1002, 1002, 1002,
+ 1002, 1003, 1003, 1003, 1003, 1003, 1004, 1004, 1004, 1004, 1005,
+ 1005, 1005, 1005, 1005, 1006, 1006, 1006, 1006, 1006, 1007, 1007,
+ 1007, 1007, 1008, 1008, 1008, 1008, 1008, 1009, 1009, 1009, 1009,
+ 1009, 1010, 1010, 1010, 1010, 1010, 1011, 1011, 1011, 1011, 1012,
+ 1012, 1012, 1012, 1012, 1013, 1013, 1013, 1013, 1013, 1014, 1014,
+ 1014, 1014, 1014, 1015, 1015, 1015, 1015, 1015, 1016, 1016, 1016,
+ 1016, 1017, 1017, 1017, 1017, 1017, 1018, 1018, 1018, 1018, 1018,
+ 1019, 1019, 1019, 1019, 1019, 1020, 1020, 1020, 1020, 1020, 1021,
+ 1021, 1021, 1021, 1021, 1022, 1022, 1022, 1022, 1022, 1023, 1023,
+ 1023,
+};
+
+static const u16 xgamma10_03[GAMMA10_TABLE_LENGTH] = {
+ 0, 128, 157, 178, 194, 207, 219, 229, 239, 247, 255,
+ 263, 270, 276, 282, 288, 294, 299, 304, 309, 314, 319,
+ 323, 328, 332, 336, 340, 344, 348, 351, 355, 358, 362,
+ 365, 368, 372, 375, 378, 381, 384, 387, 390, 393, 395,
+ 398, 401, 403, 406, 409, 411, 414, 416, 419, 421, 423,
+ 426, 428, 430, 432, 435, 437, 439, 441, 443, 445, 447,
+ 450, 452, 454, 456, 458, 460, 461, 463, 465, 467, 469,
+ 471, 473, 474, 476, 478, 480, 482, 483, 485, 487, 488,
+ 490, 492, 493, 495, 497, 498, 500, 501, 503, 505, 506,
+ 508, 509, 511, 512, 514, 515, 517, 518, 520, 521, 523,
+ 524, 525, 527, 528, 530, 531, 532, 534, 535, 537, 538,
+ 539, 541, 542, 543, 544, 546, 547, 548, 550, 551, 552,
+ 553, 555, 556, 557, 558, 560, 561, 562, 563, 565, 566,
+ 567, 568, 569, 570, 572, 573, 574, 575, 576, 577, 579,
+ 580, 581, 582, 583, 584, 585, 586, 587, 589, 590, 591,
+ 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602,
+ 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613,
+ 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624,
+ 625, 626, 627, 628, 629, 630, 631, 632, 633, 633, 634,
+ 635, 636, 637, 638, 639, 640, 641, 642, 642, 643, 644,
+ 645, 646, 647, 648, 649, 649, 650, 651, 652, 653, 654,
+ 655, 655, 656, 657, 658, 659, 660, 661, 661, 662, 663,
+ 664, 665, 665, 666, 667, 668, 669, 670, 670, 671, 672,
+ 673, 674, 674, 675, 676, 677, 677, 678, 679, 680, 681,
+ 681, 682, 683, 684, 684, 685, 686, 687, 688, 688, 689,
+ 690, 691, 691, 692, 693, 694, 694, 695, 696, 696, 697,
+ 698, 699, 699, 700, 701, 702, 702, 703, 704, 704, 705,
+ 706, 707, 707, 708, 709, 709, 710, 711, 712, 712, 713,
+ 714, 714, 715, 716, 716, 717, 718, 718, 719, 720, 721,
+ 721, 722, 723, 723, 724, 725, 725, 726, 727, 727, 728,
+ 729, 729, 730, 731, 731, 732, 733, 733, 734, 734, 735,
+ 736, 736, 737, 738, 738, 739, 740, 740, 741, 742, 742,
+ 743, 743, 744, 745, 745, 746, 747, 747, 748, 748, 749,
+ 750, 750, 751, 752, 752, 753, 753, 754, 755, 755, 756,
+ 756, 757, 758, 758, 759, 759, 760, 761, 761, 762, 762,
+ 763, 764, 764, 765, 765, 766, 767, 767, 768, 768, 769,
+ 770, 770, 771, 771, 772, 772, 773, 774, 774, 775, 775,
+ 776, 776, 777, 778, 778, 779, 779, 780, 780, 781, 782,
+ 782, 783, 783, 784, 784, 785, 785, 786, 787, 787, 788,
+ 788, 789, 789, 790, 790, 791, 792, 792, 793, 793, 794,
+ 794, 795, 795, 796, 796, 797, 797, 798, 799, 799, 800,
+ 800, 801, 801, 802, 802, 803, 803, 804, 804, 805, 805,
+ 806, 806, 807, 808, 808, 809, 809, 810, 810, 811, 811,
+ 812, 812, 813, 813, 814, 814, 815, 815, 816, 816, 817,
+ 817, 818, 818, 819, 819, 820, 820, 821, 821, 822, 822,
+ 823, 823, 824, 824, 825, 825, 826, 826, 827, 827, 828,
+ 828, 829, 829, 830, 830, 831, 831, 832, 832, 833, 833,
+ 834, 834, 835, 835, 836, 836, 836, 837, 837, 838, 838,
+ 839, 839, 840, 840, 841, 841, 842, 842, 843, 843, 844,
+ 844, 845, 845, 845, 846, 846, 847, 847, 848, 848, 849,
+ 849, 850, 850, 851, 851, 852, 852, 852, 853, 853, 854,
+ 854, 855, 855, 856, 856, 857, 857, 857, 858, 858, 859,
+ 859, 860, 860, 861, 861, 862, 862, 862, 863, 863, 864,
+ 864, 865, 865, 866, 866, 866, 867, 867, 868, 868, 869,
+ 869, 869, 870, 870, 871, 871, 872, 872, 873, 873, 873,
+ 874, 874, 875, 875, 876, 876, 876, 877, 877, 878, 878,
+ 879, 879, 879, 880, 880, 881, 881, 882, 882, 882, 883,
+ 883, 884, 884, 885, 885, 885, 886, 886, 887, 887, 887,
+ 888, 888, 889, 889, 890, 890, 890, 891, 891, 892, 892,
+ 892, 893, 893, 894, 894, 895, 895, 895, 896, 896, 897,
+ 897, 897, 898, 898, 899, 899, 899, 900, 900, 901, 901,
+ 901, 902, 902, 903, 903, 903, 904, 904, 905, 905, 905,
+ 906, 906, 907, 907, 907, 908, 908, 909, 909, 909, 910,
+ 910, 911, 911, 911, 912, 912, 913, 913, 913, 914, 914,
+ 915, 915, 915, 916, 916, 916, 917, 917, 918, 918, 918,
+ 919, 919, 920, 920, 920, 921, 921, 921, 922, 922, 923,
+ 923, 923, 924, 924, 925, 925, 925, 926, 926, 926, 927,
+ 927, 928, 928, 928, 929, 929, 929, 930, 930, 931, 931,
+ 931, 932, 932, 932, 933, 933, 934, 934, 934, 935, 935,
+ 935, 936, 936, 936, 937, 937, 938, 938, 938, 939, 939,
+ 939, 940, 940, 941, 941, 941, 942, 942, 942, 943, 943,
+ 943, 944, 944, 945, 945, 945, 946, 946, 946, 947, 947,
+ 947, 948, 948, 948, 949, 949, 950, 950, 950, 951, 951,
+ 951, 952, 952, 952, 953, 953, 953, 954, 954, 955, 955,
+ 955, 956, 956, 956, 957, 957, 957, 958, 958, 958, 959,
+ 959, 959, 960, 960, 960, 961, 961, 962, 962, 962, 963,
+ 963, 963, 964, 964, 964, 965, 965, 965, 966, 966, 966,
+ 967, 967, 967, 968, 968, 968, 969, 969, 969, 970, 970,
+ 970, 971, 971, 971, 972, 972, 972, 973, 973, 973, 974,
+ 974, 974, 975, 975, 975, 976, 976, 976, 977, 977, 977,
+ 978, 978, 978, 979, 979, 979, 980, 980, 980, 981, 981,
+ 981, 982, 982, 982, 983, 983, 983, 984, 984, 984, 985,
+ 985, 985, 986, 986, 986, 987, 987, 987, 988, 988, 988,
+ 989, 989, 989, 990, 990, 990, 991, 991, 991, 992, 992,
+ 992, 993, 993, 993, 994, 994, 994, 994, 995, 995, 995,
+ 996, 996, 996, 997, 997, 997, 998, 998, 998, 999, 999,
+ 999, 1000, 1000, 1000, 1001, 1001, 1001, 1001, 1002, 1002, 1002,
+ 1003, 1003, 1003, 1004, 1004, 1004, 1005, 1005, 1005, 1006, 1006,
+ 1006, 1006, 1007, 1007, 1007, 1008, 1008, 1008, 1009, 1009, 1009,
+ 1010, 1010, 1010, 1011, 1011, 1011, 1011, 1012, 1012, 1012, 1013,
+ 1013, 1013, 1014, 1014, 1014, 1015, 1015, 1015, 1015, 1016, 1016,
+ 1016, 1017, 1017, 1017, 1018, 1018, 1018, 1018, 1019, 1019, 1019,
+ 1020, 1020, 1020, 1021, 1021, 1021, 1021, 1022, 1022, 1022, 1023,
+ 1023,
+};
+
+static const u16 xgamma10_04[GAMMA10_TABLE_LENGTH] = {
+ 0, 64, 84, 99, 111, 122, 131, 139, 147, 154, 161,
+ 167, 173, 178, 184, 189, 194, 199, 203, 208, 212, 216,
+ 220, 224, 228, 232, 235, 239, 243, 246, 249, 253, 256,
+ 259, 262, 265, 268, 271, 274, 277, 280, 283, 285, 288,
+ 291, 293, 296, 298, 301, 303, 306, 308, 311, 313, 315,
+ 318, 320, 322, 325, 327, 329, 331, 333, 335, 338, 340,
+ 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362,
+ 364, 365, 367, 369, 371, 373, 375, 376, 378, 380, 382,
+ 383, 385, 387, 389, 390, 392, 394, 395, 397, 399, 400,
+ 402, 404, 405, 407, 408, 410, 412, 413, 415, 416, 418,
+ 419, 421, 422, 424, 425, 427, 428, 430, 431, 433, 434,
+ 436, 437, 438, 440, 441, 443, 444, 445, 447, 448, 450,
+ 451, 452, 454, 455, 456, 458, 459, 460, 462, 463, 464,
+ 466, 467, 468, 470, 471, 472, 473, 475, 476, 477, 478,
+ 480, 481, 482, 483, 485, 486, 487, 488, 489, 491, 492,
+ 493, 494, 495, 497, 498, 499, 500, 501, 503, 504, 505,
+ 506, 507, 508, 509, 511, 512, 513, 514, 515, 516, 517,
+ 518, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529,
+ 530, 531, 533, 534, 535, 536, 537, 538, 539, 540, 541,
+ 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552,
+ 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563,
+ 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
+ 575, 576, 577, 578, 578, 579, 580, 581, 582, 583, 584,
+ 585, 586, 587, 588, 589, 590, 591, 591, 592, 593, 594,
+ 595, 596, 597, 598, 599, 600, 600, 601, 602, 603, 604,
+ 605, 606, 607, 607, 608, 609, 610, 611, 612, 613, 614,
+ 614, 615, 616, 617, 618, 619, 620, 620, 621, 622, 623,
+ 624, 625, 625, 626, 627, 628, 629, 630, 630, 631, 632,
+ 633, 634, 635, 635, 636, 637, 638, 639, 639, 640, 641,
+ 642, 643, 643, 644, 645, 646, 647, 647, 648, 649, 650,
+ 651, 651, 652, 653, 654, 655, 655, 656, 657, 658, 658,
+ 659, 660, 661, 662, 662, 663, 664, 665, 665, 666, 667,
+ 668, 668, 669, 670, 671, 671, 672, 673, 674, 674, 675,
+ 676, 677, 677, 678, 679, 680, 680, 681, 682, 683, 683,
+ 684, 685, 685, 686, 687, 688, 688, 689, 690, 691, 691,
+ 692, 693, 693, 694, 695, 696, 696, 697, 698, 698, 699,
+ 700, 701, 701, 702, 703, 703, 704, 705, 705, 706, 707,
+ 708, 708, 709, 710, 710, 711, 712, 712, 713, 714, 714,
+ 715, 716, 717, 717, 718, 719, 719, 720, 721, 721, 722,
+ 723, 723, 724, 725, 725, 726, 727, 727, 728, 729, 729,
+ 730, 731, 731, 732, 733, 733, 734, 735, 735, 736, 737,
+ 737, 738, 739, 739, 740, 740, 741, 742, 742, 743, 744,
+ 744, 745, 746, 746, 747, 748, 748, 749, 749, 750, 751,
+ 751, 752, 753, 753, 754, 755, 755, 756, 756, 757, 758,
+ 758, 759, 760, 760, 761, 761, 762, 763, 763, 764, 765,
+ 765, 766, 766, 767, 768, 768, 769, 769, 770, 771, 771,
+ 772, 773, 773, 774, 774, 775, 776, 776, 777, 777, 778,
+ 779, 779, 780, 780, 781, 782, 782, 783, 783, 784, 785,
+ 785, 786, 786, 787, 788, 788, 789, 789, 790, 791, 791,
+ 792, 792, 793, 793, 794, 795, 795, 796, 796, 797, 798,
+ 798, 799, 799, 800, 800, 801, 802, 802, 803, 803, 804,
+ 804, 805, 806, 806, 807, 807, 808, 808, 809, 810, 810,
+ 811, 811, 812, 812, 813, 814, 814, 815, 815, 816, 816,
+ 817, 818, 818, 819, 819, 820, 820, 821, 821, 822, 823,
+ 823, 824, 824, 825, 825, 826, 826, 827, 827, 828, 829,
+ 829, 830, 830, 831, 831, 832, 832, 833, 834, 834, 835,
+ 835, 836, 836, 837, 837, 838, 838, 839, 839, 840, 841,
+ 841, 842, 842, 843, 843, 844, 844, 845, 845, 846, 846,
+ 847, 847, 848, 849, 849, 850, 850, 851, 851, 852, 852,
+ 853, 853, 854, 854, 855, 855, 856, 856, 857, 857, 858,
+ 859, 859, 860, 860, 861, 861, 862, 862, 863, 863, 864,
+ 864, 865, 865, 866, 866, 867, 867, 868, 868, 869, 869,
+ 870, 870, 871, 871, 872, 872, 873, 873, 874, 874, 875,
+ 875, 876, 876, 877, 877, 878, 878, 879, 879, 880, 880,
+ 881, 881, 882, 882, 883, 883, 884, 884, 885, 885, 886,
+ 886, 887, 887, 888, 888, 889, 889, 890, 890, 891, 891,
+ 892, 892, 893, 893, 894, 894, 895, 895, 896, 896, 897,
+ 897, 898, 898, 899, 899, 900, 900, 901, 901, 902, 902,
+ 903, 903, 904, 904, 905, 905, 905, 906, 906, 907, 907,
+ 908, 908, 909, 909, 910, 910, 911, 911, 912, 912, 913,
+ 913, 914, 914, 915, 915, 915, 916, 916, 917, 917, 918,
+ 918, 919, 919, 920, 920, 921, 921, 922, 922, 923, 923,
+ 923, 924, 924, 925, 925, 926, 926, 927, 927, 928, 928,
+ 929, 929, 929, 930, 930, 931, 931, 932, 932, 933, 933,
+ 934, 934, 935, 935, 935, 936, 936, 937, 937, 938, 938,
+ 939, 939, 940, 940, 940, 941, 941, 942, 942, 943, 943,
+ 944, 944, 945, 945, 945, 946, 946, 947, 947, 948, 948,
+ 949, 949, 949, 950, 950, 951, 951, 952, 952, 953, 953,
+ 953, 954, 954, 955, 955, 956, 956, 957, 957, 957, 958,
+ 958, 959, 959, 960, 960, 961, 961, 961, 962, 962, 963,
+ 963, 964, 964, 965, 965, 965, 966, 966, 967, 967, 968,
+ 968, 968, 969, 969, 970, 970, 971, 971, 971, 972, 972,
+ 973, 973, 974, 974, 974, 975, 975, 976, 976, 977, 977,
+ 977, 978, 978, 979, 979, 980, 980, 980, 981, 981, 982,
+ 982, 983, 983, 983, 984, 984, 985, 985, 986, 986, 986,
+ 987, 987, 988, 988, 989, 989, 989, 990, 990, 991, 991,
+ 991, 992, 992, 993, 993, 994, 994, 994, 995, 995, 996,
+ 996, 996, 997, 997, 998, 998, 999, 999, 999, 1000, 1000,
+ 1001, 1001, 1001, 1002, 1002, 1003, 1003, 1004, 1004, 1004, 1005,
+ 1005, 1006, 1006, 1006, 1007, 1007, 1008, 1008, 1008, 1009, 1009,
+ 1010, 1010, 1010, 1011, 1011, 1012, 1012, 1013, 1013, 1013, 1014,
+ 1014, 1015, 1015, 1015, 1016, 1016, 1017, 1017, 1017, 1018, 1018,
+ 1019, 1019, 1019, 1020, 1020, 1021, 1021, 1021, 1022, 1022, 1023,
+ 1023,
+};
+
+static const u16 xgamma10_05[GAMMA10_TABLE_LENGTH] = {
+ 0, 32, 45, 55, 64, 72, 78, 85, 90, 96, 101,
+ 106, 111, 115, 120, 124, 128, 132, 136, 139, 143, 147,
+ 150, 153, 157, 160, 163, 166, 169, 172, 175, 178, 181,
+ 184, 186, 189, 192, 195, 197, 200, 202, 205, 207, 210,
+ 212, 215, 217, 219, 222, 224, 226, 228, 231, 233, 235,
+ 237, 239, 241, 244, 246, 248, 250, 252, 254, 256, 258,
+ 260, 262, 264, 266, 268, 270, 271, 273, 275, 277, 279,
+ 281, 282, 284, 286, 288, 290, 291, 293, 295, 297, 298,
+ 300, 302, 303, 305, 307, 308, 310, 312, 313, 315, 317,
+ 318, 320, 321, 323, 325, 326, 328, 329, 331, 332, 334,
+ 335, 337, 338, 340, 341, 343, 344, 346, 347, 349, 350,
+ 352, 353, 355, 356, 358, 359, 360, 362, 363, 365, 366,
+ 367, 369, 370, 372, 373, 374, 376, 377, 378, 380, 381,
+ 382, 384, 385, 386, 388, 389, 390, 392, 393, 394, 396,
+ 397, 398, 399, 401, 402, 403, 405, 406, 407, 408, 410,
+ 411, 412, 413, 415, 416, 417, 418, 419, 421, 422, 423,
+ 424, 426, 427, 428, 429, 430, 431, 433, 434, 435, 436,
+ 437, 439, 440, 441, 442, 443, 444, 445, 447, 448, 449,
+ 450, 451, 452, 453, 455, 456, 457, 458, 459, 460, 461,
+ 462, 463, 465, 466, 467, 468, 469, 470, 471, 472, 473,
+ 474, 475, 477, 478, 479, 480, 481, 482, 483, 484, 485,
+ 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 497,
+ 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508,
+ 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519,
+ 520, 521, 522, 523, 524, 525, 526, 527, 527, 528, 529,
+ 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540,
+ 541, 542, 543, 544, 545, 546, 547, 547, 548, 549, 550,
+ 551, 552, 553, 554, 555, 556, 557, 558, 559, 559, 560,
+ 561, 562, 563, 564, 565, 566, 567, 568, 569, 569, 570,
+ 571, 572, 573, 574, 575, 576, 577, 577, 578, 579, 580,
+ 581, 582, 583, 584, 585, 585, 586, 587, 588, 589, 590,
+ 591, 591, 592, 593, 594, 595, 596, 597, 598, 598, 599,
+ 600, 601, 602, 603, 603, 604, 605, 606, 607, 608, 609,
+ 609, 610, 611, 612, 613, 614, 614, 615, 616, 617, 618,
+ 619, 619, 620, 621, 622, 623, 623, 624, 625, 626, 627,
+ 628, 628, 629, 630, 631, 632, 632, 633, 634, 635, 636,
+ 636, 637, 638, 639, 640, 640, 641, 642, 643, 644, 644,
+ 645, 646, 647, 648, 648, 649, 650, 651, 652, 652, 653,
+ 654, 655, 655, 656, 657, 658, 659, 659, 660, 661, 662,
+ 662, 663, 664, 665, 666, 666, 667, 668, 669, 669, 670,
+ 671, 672, 672, 673, 674, 675, 675, 676, 677, 678, 678,
+ 679, 680, 681, 681, 682, 683, 684, 684, 685, 686, 687,
+ 687, 688, 689, 690, 690, 691, 692, 693, 693, 694, 695,
+ 696, 696, 697, 698, 699, 699, 700, 701, 701, 702, 703,
+ 704, 704, 705, 706, 707, 707, 708, 709, 709, 710, 711,
+ 712, 712, 713, 714, 714, 715, 716, 717, 717, 718, 719,
+ 719, 720, 721, 722, 722, 723, 724, 724, 725, 726, 727,
+ 727, 728, 729, 729, 730, 731, 731, 732, 733, 734, 734,
+ 735, 736, 736, 737, 738, 738, 739, 740, 740, 741, 742,
+ 743, 743, 744, 745, 745, 746, 747, 747, 748, 749, 749,
+ 750, 751, 751, 752, 753, 754, 754, 755, 756, 756, 757,
+ 758, 758, 759, 760, 760, 761, 762, 762, 763, 764, 764,
+ 765, 766, 766, 767, 768, 768, 769, 770, 770, 771, 772,
+ 772, 773, 774, 774, 775, 776, 776, 777, 778, 778, 779,
+ 780, 780, 781, 781, 782, 783, 783, 784, 785, 785, 786,
+ 787, 787, 788, 789, 789, 790, 791, 791, 792, 793, 793,
+ 794, 794, 795, 796, 796, 797, 798, 798, 799, 800, 800,
+ 801, 802, 802, 803, 803, 804, 805, 805, 806, 807, 807,
+ 808, 809, 809, 810, 810, 811, 812, 812, 813, 814, 814,
+ 815, 815, 816, 817, 817, 818, 819, 819, 820, 820, 821,
+ 822, 822, 823, 824, 824, 825, 825, 826, 827, 827, 828,
+ 829, 829, 830, 830, 831, 832, 832, 833, 833, 834, 835,
+ 835, 836, 836, 837, 838, 838, 839, 840, 840, 841, 841,
+ 842, 843, 843, 844, 844, 845, 846, 846, 847, 847, 848,
+ 849, 849, 850, 850, 851, 852, 852, 853, 853, 854, 855,
+ 855, 856, 856, 857, 858, 858, 859, 859, 860, 861, 861,
+ 862, 862, 863, 864, 864, 865, 865, 866, 867, 867, 868,
+ 868, 869, 869, 870, 871, 871, 872, 872, 873, 874, 874,
+ 875, 875, 876, 877, 877, 878, 878, 879, 879, 880, 881,
+ 881, 882, 882, 883, 883, 884, 885, 885, 886, 886, 887,
+ 888, 888, 889, 889, 890, 890, 891, 892, 892, 893, 893,
+ 894, 894, 895, 896, 896, 897, 897, 898, 898, 899, 900,
+ 900, 901, 901, 902, 902, 903, 904, 904, 905, 905, 906,
+ 906, 907, 907, 908, 909, 909, 910, 910, 911, 911, 912,
+ 913, 913, 914, 914, 915, 915, 916, 916, 917, 918, 918,
+ 919, 919, 920, 920, 921, 921, 922, 923, 923, 924, 924,
+ 925, 925, 926, 926, 927, 928, 928, 929, 929, 930, 930,
+ 931, 931, 932, 932, 933, 934, 934, 935, 935, 936, 936,
+ 937, 937, 938, 939, 939, 940, 940, 941, 941, 942, 942,
+ 943, 943, 944, 944, 945, 946, 946, 947, 947, 948, 948,
+ 949, 949, 950, 950, 951, 952, 952, 953, 953, 954, 954,
+ 955, 955, 956, 956, 957, 957, 958, 958, 959, 960, 960,
+ 961, 961, 962, 962, 963, 963, 964, 964, 965, 965, 966,
+ 966, 967, 967, 968, 969, 969, 970, 970, 971, 971, 972,
+ 972, 973, 973, 974, 974, 975, 975, 976, 976, 977, 977,
+ 978, 979, 979, 980, 980, 981, 981, 982, 982, 983, 983,
+ 984, 984, 985, 985, 986, 986, 987, 987, 988, 988, 989,
+ 989, 990, 990, 991, 992, 992, 993, 993, 994, 994, 995,
+ 995, 996, 996, 997, 997, 998, 998, 999, 999, 1000, 1000,
+ 1001, 1001, 1002, 1002, 1003, 1003, 1004, 1004, 1005, 1005, 1006,
+ 1006, 1007, 1007, 1008, 1008, 1009, 1009, 1010, 1010, 1011, 1011,
+ 1012, 1012, 1013, 1013, 1014, 1014, 1015, 1015, 1016, 1016, 1017,
+ 1017, 1018, 1018, 1019, 1019, 1020, 1020, 1021, 1021, 1022, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_06[GAMMA10_TABLE_LENGTH] = {
+ 0, 16, 24, 31, 37, 42, 47, 51, 56, 60, 64,
+ 67, 71, 75, 78, 81, 84, 88, 91, 94, 97, 99,
+ 102, 105, 108, 110, 113, 116, 118, 121, 123, 126, 128,
+ 130, 133, 135, 137, 140, 142, 144, 146, 148, 151, 153,
+ 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175,
+ 177, 179, 181, 183, 185, 187, 188, 190, 192, 194, 196,
+ 198, 199, 201, 203, 205, 206, 208, 210, 212, 213, 215,
+ 217, 218, 220, 222, 223, 225, 227, 228, 230, 232, 233,
+ 235, 236, 238, 240, 241, 243, 244, 246, 247, 249, 250,
+ 252, 253, 255, 257, 258, 260, 261, 263, 264, 265, 267,
+ 268, 270, 271, 273, 274, 276, 277, 279, 280, 281, 283,
+ 284, 286, 287, 288, 290, 291, 293, 294, 295, 297, 298,
+ 299, 301, 302, 303, 305, 306, 308, 309, 310, 312, 313,
+ 314, 315, 317, 318, 319, 321, 322, 323, 325, 326, 327,
+ 328, 330, 331, 332, 334, 335, 336, 337, 339, 340, 341,
+ 342, 344, 345, 346, 347, 349, 350, 351, 352, 353, 355,
+ 356, 357, 358, 359, 361, 362, 363, 364, 365, 367, 368,
+ 369, 370, 371, 373, 374, 375, 376, 377, 378, 380, 381,
+ 382, 383, 384, 385, 387, 388, 389, 390, 391, 392, 393,
+ 394, 396, 397, 398, 399, 400, 401, 402, 403, 405, 406,
+ 407, 408, 409, 410, 411, 412, 413, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 425, 426, 428, 429, 430,
+ 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441,
+ 442, 443, 445, 446, 447, 448, 449, 450, 451, 452, 453,
+ 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464,
+ 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475,
+ 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486,
+ 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497,
+ 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 507,
+ 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518,
+ 519, 520, 521, 522, 523, 524, 525, 525, 526, 527, 528,
+ 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 538,
+ 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 548,
+ 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 558,
+ 559, 560, 561, 562, 563, 564, 565, 566, 566, 567, 568,
+ 569, 570, 571, 572, 573, 574, 574, 575, 576, 577, 578,
+ 579, 580, 581, 581, 582, 583, 584, 585, 586, 587, 588,
+ 588, 589, 590, 591, 592, 593, 594, 594, 595, 596, 597,
+ 598, 599, 600, 601, 601, 602, 603, 604, 605, 606, 606,
+ 607, 608, 609, 610, 611, 612, 612, 613, 614, 615, 616,
+ 617, 617, 618, 619, 620, 621, 622, 622, 623, 624, 625,
+ 626, 627, 627, 628, 629, 630, 631, 632, 632, 633, 634,
+ 635, 636, 637, 637, 638, 639, 640, 641, 642, 642, 643,
+ 644, 645, 646, 646, 647, 648, 649, 650, 650, 651, 652,
+ 653, 654, 655, 655, 656, 657, 658, 659, 659, 660, 661,
+ 662, 663, 663, 664, 665, 666, 667, 667, 668, 669, 670,
+ 671, 671, 672, 673, 674, 675, 675, 676, 677, 678, 678,
+ 679, 680, 681, 682, 682, 683, 684, 685, 686, 686, 687,
+ 688, 689, 689, 690, 691, 692, 693, 693, 694, 695, 696,
+ 696, 697, 698, 699, 700, 700, 701, 702, 703, 703, 704,
+ 705, 706, 707, 707, 708, 709, 710, 710, 711, 712, 713,
+ 713, 714, 715, 716, 716, 717, 718, 719, 719, 720, 721,
+ 722, 723, 723, 724, 725, 726, 726, 727, 728, 729, 729,
+ 730, 731, 732, 732, 733, 734, 735, 735, 736, 737, 738,
+ 738, 739, 740, 741, 741, 742, 743, 743, 744, 745, 746,
+ 746, 747, 748, 749, 749, 750, 751, 752, 752, 753, 754,
+ 755, 755, 756, 757, 758, 758, 759, 760, 760, 761, 762,
+ 763, 763, 764, 765, 766, 766, 767, 768, 768, 769, 770,
+ 771, 771, 772, 773, 774, 774, 775, 776, 776, 777, 778,
+ 779, 779, 780, 781, 781, 782, 783, 784, 784, 785, 786,
+ 786, 787, 788, 789, 789, 790, 791, 791, 792, 793, 794,
+ 794, 795, 796, 796, 797, 798, 799, 799, 800, 801, 801,
+ 802, 803, 803, 804, 805, 806, 806, 807, 808, 808, 809,
+ 810, 811, 811, 812, 813, 813, 814, 815, 815, 816, 817,
+ 818, 818, 819, 820, 820, 821, 822, 822, 823, 824, 824,
+ 825, 826, 827, 827, 828, 829, 829, 830, 831, 831, 832,
+ 833, 833, 834, 835, 835, 836, 837, 838, 838, 839, 840,
+ 840, 841, 842, 842, 843, 844, 844, 845, 846, 846, 847,
+ 848, 848, 849, 850, 851, 851, 852, 853, 853, 854, 855,
+ 855, 856, 857, 857, 858, 859, 859, 860, 861, 861, 862,
+ 863, 863, 864, 865, 865, 866, 867, 867, 868, 869, 869,
+ 870, 871, 871, 872, 873, 873, 874, 875, 875, 876, 877,
+ 877, 878, 879, 879, 880, 881, 881, 882, 883, 883, 884,
+ 885, 885, 886, 887, 887, 888, 889, 889, 890, 891, 891,
+ 892, 893, 893, 894, 895, 895, 896, 897, 897, 898, 898,
+ 899, 900, 900, 901, 902, 902, 903, 904, 904, 905, 906,
+ 906, 907, 908, 908, 909, 910, 910, 911, 911, 912, 913,
+ 913, 914, 915, 915, 916, 917, 917, 918, 919, 919, 920,
+ 921, 921, 922, 922, 923, 924, 924, 925, 926, 926, 927,
+ 928, 928, 929, 930, 930, 931, 931, 932, 933, 933, 934,
+ 935, 935, 936, 937, 937, 938, 938, 939, 940, 940, 941,
+ 942, 942, 943, 944, 944, 945, 945, 946, 947, 947, 948,
+ 949, 949, 950, 950, 951, 952, 952, 953, 954, 954, 955,
+ 956, 956, 957, 957, 958, 959, 959, 960, 961, 961, 962,
+ 962, 963, 964, 964, 965, 966, 966, 967, 967, 968, 969,
+ 969, 970, 970, 971, 972, 972, 973, 974, 974, 975, 975,
+ 976, 977, 977, 978, 979, 979, 980, 980, 981, 982, 982,
+ 983, 983, 984, 985, 985, 986, 987, 987, 988, 988, 989,
+ 990, 990, 991, 991, 992, 993, 993, 994, 995, 995, 996,
+ 996, 997, 998, 998, 999, 999, 1000, 1001, 1001, 1002, 1002,
+ 1003, 1004, 1004, 1005, 1006, 1006, 1007, 1007, 1008, 1009, 1009,
+ 1010, 1010, 1011, 1012, 1012, 1013, 1013, 1014, 1015, 1015, 1016,
+ 1016, 1017, 1018, 1018, 1019, 1019, 1020, 1021, 1021, 1022, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_07[GAMMA10_TABLE_LENGTH] = {
+ 0, 8, 13, 17, 21, 25, 28, 31, 34, 37, 40,
+ 43, 46, 48, 51, 53, 56, 58, 60, 63, 65, 67,
+ 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90,
+ 92, 94, 96, 98, 100, 102, 104, 106, 108, 109, 111,
+ 113, 115, 117, 118, 120, 122, 124, 125, 127, 129, 131,
+ 132, 134, 136, 137, 139, 140, 142, 144, 145, 147, 149,
+ 150, 152, 153, 155, 157, 158, 160, 161, 163, 164, 166,
+ 167, 169, 170, 172, 173, 175, 176, 178, 179, 181, 182,
+ 184, 185, 187, 188, 190, 191, 192, 194, 195, 197, 198,
+ 199, 201, 202, 204, 205, 206, 208, 209, 211, 212, 213,
+ 215, 216, 217, 219, 220, 222, 223, 224, 226, 227, 228,
+ 230, 231, 232, 234, 235, 236, 237, 239, 240, 241, 243,
+ 244, 245, 247, 248, 249, 250, 252, 253, 254, 256, 257,
+ 258, 259, 261, 262, 263, 264, 266, 267, 268, 269, 271,
+ 272, 273, 274, 275, 277, 278, 279, 280, 282, 283, 284,
+ 285, 286, 288, 289, 290, 291, 292, 294, 295, 296, 297,
+ 298, 300, 301, 302, 303, 304, 306, 307, 308, 309, 310,
+ 311, 313, 314, 315, 316, 317, 318, 319, 321, 322, 323,
+ 324, 325, 326, 327, 329, 330, 331, 332, 333, 334, 335,
+ 337, 338, 339, 340, 341, 342, 343, 344, 346, 347, 348,
+ 349, 350, 351, 352, 353, 354, 355, 357, 358, 359, 360,
+ 361, 362, 363, 364, 365, 366, 368, 369, 370, 371, 372,
+ 373, 374, 375, 376, 377, 378, 379, 380, 382, 383, 384,
+ 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395,
+ 396, 397, 398, 400, 401, 402, 403, 404, 405, 406, 407,
+ 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429,
+ 430, 431, 432, 433, 434, 435, 436, 437, 439, 440, 441,
+ 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452,
+ 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462,
+ 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473,
+ 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484,
+ 485, 486, 487, 488, 489, 490, 491, 492, 492, 493, 494,
+ 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505,
+ 506, 507, 508, 509, 510, 511, 511, 512, 513, 514, 515,
+ 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526,
+ 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536,
+ 537, 538, 538, 539, 540, 541, 542, 543, 544, 545, 546,
+ 547, 548, 549, 549, 550, 551, 552, 553, 554, 555, 556,
+ 557, 558, 559, 560, 560, 561, 562, 563, 564, 565, 566,
+ 567, 568, 569, 569, 570, 571, 572, 573, 574, 575, 576,
+ 577, 578, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 586, 587, 588, 589, 590, 591, 592, 593, 594, 594, 595,
+ 596, 597, 598, 599, 600, 601, 601, 602, 603, 604, 605,
+ 606, 607, 608, 608, 609, 610, 611, 612, 613, 614, 615,
+ 615, 616, 617, 618, 619, 620, 621, 622, 622, 623, 624,
+ 625, 626, 627, 628, 628, 629, 630, 631, 632, 633, 634,
+ 634, 635, 636, 637, 638, 639, 640, 640, 641, 642, 643,
+ 644, 645, 646, 646, 647, 648, 649, 650, 651, 652, 652,
+ 653, 654, 655, 656, 657, 657, 658, 659, 660, 661, 662,
+ 663, 663, 664, 665, 666, 667, 668, 668, 669, 670, 671,
+ 672, 673, 673, 674, 675, 676, 677, 678, 678, 679, 680,
+ 681, 682, 683, 683, 684, 685, 686, 687, 688, 688, 689,
+ 690, 691, 692, 693, 693, 694, 695, 696, 697, 698, 698,
+ 699, 700, 701, 702, 703, 703, 704, 705, 706, 707, 707,
+ 708, 709, 710, 711, 712, 712, 713, 714, 715, 716, 716,
+ 717, 718, 719, 720, 721, 721, 722, 723, 724, 725, 725,
+ 726, 727, 728, 729, 729, 730, 731, 732, 733, 733, 734,
+ 735, 736, 737, 738, 738, 739, 740, 741, 742, 742, 743,
+ 744, 745, 746, 746, 747, 748, 749, 750, 750, 751, 752,
+ 753, 754, 754, 755, 756, 757, 758, 758, 759, 760, 761,
+ 761, 762, 763, 764, 765, 765, 766, 767, 768, 769, 769,
+ 770, 771, 772, 773, 773, 774, 775, 776, 777, 777, 778,
+ 779, 780, 780, 781, 782, 783, 784, 784, 785, 786, 787,
+ 788, 788, 789, 790, 791, 791, 792, 793, 794, 795, 795,
+ 796, 797, 798, 798, 799, 800, 801, 802, 802, 803, 804,
+ 805, 805, 806, 807, 808, 809, 809, 810, 811, 812, 812,
+ 813, 814, 815, 816, 816, 817, 818, 819, 819, 820, 821,
+ 822, 822, 823, 824, 825, 826, 826, 827, 828, 829, 829,
+ 830, 831, 832, 832, 833, 834, 835, 835, 836, 837, 838,
+ 839, 839, 840, 841, 842, 842, 843, 844, 845, 845, 846,
+ 847, 848, 848, 849, 850, 851, 851, 852, 853, 854, 854,
+ 855, 856, 857, 857, 858, 859, 860, 860, 861, 862, 863,
+ 864, 864, 865, 866, 867, 867, 868, 869, 870, 870, 871,
+ 872, 873, 873, 874, 875, 876, 876, 877, 878, 879, 879,
+ 880, 881, 881, 882, 883, 884, 884, 885, 886, 887, 887,
+ 888, 889, 890, 890, 891, 892, 893, 893, 894, 895, 896,
+ 896, 897, 898, 899, 899, 900, 901, 902, 902, 903, 904,
+ 904, 905, 906, 907, 907, 908, 909, 910, 910, 911, 912,
+ 913, 913, 914, 915, 916, 916, 917, 918, 918, 919, 920,
+ 921, 921, 922, 923, 924, 924, 925, 926, 927, 927, 928,
+ 929, 929, 930, 931, 932, 932, 933, 934, 935, 935, 936,
+ 937, 937, 938, 939, 940, 940, 941, 942, 943, 943, 944,
+ 945, 945, 946, 947, 948, 948, 949, 950, 950, 951, 952,
+ 953, 953, 954, 955, 956, 956, 957, 958, 958, 959, 960,
+ 961, 961, 962, 963, 963, 964, 965, 966, 966, 967, 968,
+ 968, 969, 970, 971, 971, 972, 973, 973, 974, 975, 976,
+ 976, 977, 978, 978, 979, 980, 981, 981, 982, 983, 983,
+ 984, 985, 986, 986, 987, 988, 988, 989, 990, 991, 991,
+ 992, 993, 993, 994, 995, 996, 996, 997, 998, 998, 999,
+ 1000, 1000, 1001, 1002, 1003, 1003, 1004, 1005, 1005, 1006, 1007,
+ 1008, 1008, 1009, 1010, 1010, 1011, 1012, 1012, 1013, 1014, 1015,
+ 1015, 1016, 1017, 1017, 1018, 1019, 1019, 1020, 1021, 1022, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_08[GAMMA10_TABLE_LENGTH] = {
+ 0, 4, 7, 10, 12, 14, 17, 19, 21, 23, 25,
+ 27, 29, 31, 33, 35, 37, 39, 40, 42, 44, 46,
+ 47, 49, 51, 53, 54, 56, 58, 59, 61, 62, 64,
+ 66, 67, 69, 70, 72, 73, 75, 76, 78, 80, 81,
+ 83, 84, 86, 87, 89, 90, 91, 93, 94, 96, 97,
+ 99, 100, 102, 103, 104, 106, 107, 109, 110, 111, 113,
+ 114, 116, 117, 118, 120, 121, 122, 124, 125, 126, 128,
+ 129, 131, 132, 133, 135, 136, 137, 138, 140, 141, 142,
+ 144, 145, 146, 148, 149, 150, 152, 153, 154, 155, 157,
+ 158, 159, 160, 162, 163, 164, 166, 167, 168, 169, 171,
+ 172, 173, 174, 176, 177, 178, 179, 181, 182, 183, 184,
+ 185, 187, 188, 189, 190, 192, 193, 194, 195, 196, 198,
+ 199, 200, 201, 202, 204, 205, 206, 207, 208, 210, 211,
+ 212, 213, 214, 216, 217, 218, 219, 220, 221, 223, 224,
+ 225, 226, 227, 228, 230, 231, 232, 233, 234, 235, 237,
+ 238, 239, 240, 241, 242, 243, 245, 246, 247, 248, 249,
+ 250, 251, 253, 254, 255, 256, 257, 258, 259, 260, 262,
+ 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 292, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 304, 305, 306, 307, 308, 309, 310,
+ 311, 312, 313, 314, 315, 316, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333,
+ 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345,
+ 346, 347, 348, 349, 350, 351, 352, 353, 355, 356, 357,
+ 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368,
+ 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379,
+ 380, 381, 382, 383, 384, 385, 386, 388, 389, 390, 391,
+ 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402,
+ 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424,
+ 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435,
+ 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446,
+ 447, 448, 449, 450, 451, 451, 452, 453, 454, 455, 456,
+ 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467,
+ 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478,
+ 479, 480, 481, 482, 483, 484, 485, 486, 486, 487, 488,
+ 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499,
+ 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 509,
+ 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520,
+ 521, 522, 523, 524, 525, 526, 527, 527, 528, 529, 530,
+ 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541,
+ 542, 543, 543, 544, 545, 546, 547, 548, 549, 550, 551,
+ 552, 553, 554, 555, 556, 557, 557, 558, 559, 560, 561,
+ 562, 563, 564, 565, 566, 567, 568, 569, 570, 570, 571,
+ 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592,
+ 593, 594, 594, 595, 596, 597, 598, 599, 600, 601, 602,
+ 603, 604, 604, 605, 606, 607, 608, 609, 610, 611, 612,
+ 613, 614, 615, 615, 616, 617, 618, 619, 620, 621, 622,
+ 623, 624, 624, 625, 626, 627, 628, 629, 630, 631, 632,
+ 633, 634, 634, 635, 636, 637, 638, 639, 640, 641, 642,
+ 643, 643, 644, 645, 646, 647, 648, 649, 650, 651, 651,
+ 652, 653, 654, 655, 656, 657, 658, 659, 660, 660, 661,
+ 662, 663, 664, 665, 666, 667, 668, 668, 669, 670, 671,
+ 672, 673, 674, 675, 676, 676, 677, 678, 679, 680, 681,
+ 682, 683, 684, 684, 685, 686, 687, 688, 689, 690, 691,
+ 691, 692, 693, 694, 695, 696, 697, 698, 699, 699, 700,
+ 701, 702, 703, 704, 705, 706, 706, 707, 708, 709, 710,
+ 711, 712, 713, 713, 714, 715, 716, 717, 718, 719, 720,
+ 720, 721, 722, 723, 724, 725, 726, 727, 727, 728, 729,
+ 730, 731, 732, 733, 734, 734, 735, 736, 737, 738, 739,
+ 740, 740, 741, 742, 743, 744, 745, 746, 747, 747, 748,
+ 749, 750, 751, 752, 753, 753, 754, 755, 756, 757, 758,
+ 759, 759, 760, 761, 762, 763, 764, 765, 766, 766, 767,
+ 768, 769, 770, 771, 772, 772, 773, 774, 775, 776, 777,
+ 778, 778, 779, 780, 781, 782, 783, 784, 784, 785, 786,
+ 787, 788, 789, 790, 790, 791, 792, 793, 794, 795, 795,
+ 796, 797, 798, 799, 800, 801, 801, 802, 803, 804, 805,
+ 806, 807, 807, 808, 809, 810, 811, 812, 812, 813, 814,
+ 815, 816, 817, 818, 818, 819, 820, 821, 822, 823, 823,
+ 824, 825, 826, 827, 828, 829, 829, 830, 831, 832, 833,
+ 834, 834, 835, 836, 837, 838, 839, 839, 840, 841, 842,
+ 843, 844, 845, 845, 846, 847, 848, 849, 850, 850, 851,
+ 852, 853, 854, 855, 855, 856, 857, 858, 859, 860, 860,
+ 861, 862, 863, 864, 865, 865, 866, 867, 868, 869, 870,
+ 870, 871, 872, 873, 874, 875, 875, 876, 877, 878, 879,
+ 880, 880, 881, 882, 883, 884, 885, 885, 886, 887, 888,
+ 889, 890, 890, 891, 892, 893, 894, 895, 895, 896, 897,
+ 898, 899, 899, 900, 901, 902, 903, 904, 904, 905, 906,
+ 907, 908, 909, 909, 910, 911, 912, 913, 913, 914, 915,
+ 916, 917, 918, 918, 919, 920, 921, 922, 923, 923, 924,
+ 925, 926, 927, 927, 928, 929, 930, 931, 932, 932, 933,
+ 934, 935, 936, 936, 937, 938, 939, 940, 941, 941, 942,
+ 943, 944, 945, 945, 946, 947, 948, 949, 950, 950, 951,
+ 952, 953, 954, 954, 955, 956, 957, 958, 958, 959, 960,
+ 961, 962, 963, 963, 964, 965, 966, 967, 967, 968, 969,
+ 970, 971, 971, 972, 973, 974, 975, 976, 976, 977, 978,
+ 979, 980, 980, 981, 982, 983, 984, 984, 985, 986, 987,
+ 988, 988, 989, 990, 991, 992, 992, 993, 994, 995, 996,
+ 997, 997, 998, 999, 1000, 1001, 1001, 1002, 1003, 1004, 1005,
+ 1005, 1006, 1007, 1008, 1009, 1009, 1010, 1011, 1012, 1013, 1013,
+ 1014, 1015, 1016, 1017, 1017, 1018, 1019, 1020, 1021, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_09[GAMMA10_TABLE_LENGTH] = {
+ 0, 2, 4, 5, 7, 9, 10, 12, 13, 14, 16,
+ 17, 19, 20, 22, 23, 24, 26, 27, 28, 30, 31,
+ 32, 34, 35, 36, 38, 39, 40, 41, 43, 44, 45,
+ 47, 48, 49, 50, 52, 53, 54, 55, 57, 58, 59,
+ 60, 62, 63, 64, 65, 66, 68, 69, 70, 71, 72,
+ 74, 75, 76, 77, 78, 80, 81, 82, 83, 84, 86,
+ 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 99,
+ 100, 101, 102, 103, 104, 106, 107, 108, 109, 110, 111,
+ 112, 114, 115, 116, 117, 118, 119, 120, 122, 123, 124,
+ 125, 126, 127, 128, 130, 131, 132, 133, 134, 135, 136,
+ 137, 139, 140, 141, 142, 143, 144, 145, 146, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 158, 159, 160, 161,
+ 162, 163, 164, 165, 166, 168, 169, 170, 171, 172, 173,
+ 174, 175, 176, 177, 178, 180, 181, 182, 183, 184, 185,
+ 186, 187, 188, 189, 190, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221,
+ 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255,
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290,
+ 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301,
+ 302, 303, 304, 305, 306, 307, 308, 309, 311, 312, 313,
+ 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335,
+ 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346,
+ 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357,
+ 358, 359, 360, 361, 362, 363, 364, 365, 367, 368, 369,
+ 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380,
+ 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391,
+ 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402,
+ 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424,
+ 425, 426, 427, 427, 428, 429, 430, 431, 432, 433, 434,
+ 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445,
+ 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456,
+ 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467,
+ 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478,
+ 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489,
+ 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499,
+ 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510,
+ 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521,
+ 522, 523, 524, 525, 525, 526, 527, 528, 529, 530, 531,
+ 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542,
+ 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553,
+ 554, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563,
+ 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
+ 575, 576, 577, 578, 579, 579, 580, 581, 582, 583, 584,
+ 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595,
+ 596, 597, 598, 599, 600, 601, 601, 602, 603, 604, 605,
+ 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616,
+ 617, 618, 619, 620, 621, 621, 622, 623, 624, 625, 626,
+ 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637,
+ 638, 639, 640, 640, 641, 642, 643, 644, 645, 646, 647,
+ 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658,
+ 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668,
+ 669, 670, 671, 672, 673, 674, 675, 675, 676, 677, 678,
+ 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689,
+ 690, 691, 691, 692, 693, 694, 695, 696, 697, 698, 699,
+ 700, 701, 702, 703, 704, 705, 706, 706, 707, 708, 709,
+ 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720,
+ 721, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730,
+ 731, 732, 733, 734, 735, 735, 736, 737, 738, 739, 740,
+ 741, 742, 743, 744, 745, 746, 747, 748, 749, 749, 750,
+ 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761,
+ 762, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771,
+ 772, 773, 774, 775, 776, 776, 777, 778, 779, 780, 781,
+ 782, 783, 784, 785, 786, 787, 788, 788, 789, 790, 791,
+ 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 801,
+ 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812,
+ 813, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822,
+ 823, 824, 825, 825, 826, 827, 828, 829, 830, 831, 832,
+ 833, 834, 835, 836, 836, 837, 838, 839, 840, 841, 842,
+ 843, 844, 845, 846, 847, 848, 848, 849, 850, 851, 852,
+ 853, 854, 855, 856, 857, 858, 859, 859, 860, 861, 862,
+ 863, 864, 865, 866, 867, 868, 869, 870, 870, 871, 872,
+ 873, 874, 875, 876, 877, 878, 879, 880, 881, 881, 882,
+ 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 892,
+ 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 902,
+ 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913,
+ 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923,
+ 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933,
+ 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943,
+ 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953,
+ 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 962,
+ 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 972,
+ 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 982,
+ 983, 984, 985, 986, 987, 988, 989, 990, 991, 991, 992,
+ 993, 994, 995, 996, 997, 998, 999, 1000, 1000, 1001, 1002,
+ 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1009, 1010, 1011, 1012,
+ 1013, 1014, 1015, 1016, 1017, 1018, 1018, 1019, 1020, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_10[GAMMA10_TABLE_LENGTH] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
+ 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285,
+ 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
+ 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318,
+ 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329,
+ 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340,
+ 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351,
+ 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
+ 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373,
+ 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384,
+ 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395,
+ 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406,
+ 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417,
+ 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428,
+ 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439,
+ 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450,
+ 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461,
+ 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472,
+ 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483,
+ 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494,
+ 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505,
+ 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516,
+ 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527,
+ 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538,
+ 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549,
+ 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560,
+ 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571,
+ 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582,
+ 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593,
+ 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604,
+ 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615,
+ 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626,
+ 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637,
+ 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648,
+ 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659,
+ 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670,
+ 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681,
+ 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692,
+ 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703,
+ 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714,
+ 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725,
+ 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736,
+ 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747,
+ 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758,
+ 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769,
+ 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780,
+ 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791,
+ 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802,
+ 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813,
+ 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824,
+ 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835,
+ 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846,
+ 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857,
+ 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868,
+ 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879,
+ 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890,
+ 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901,
+ 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912,
+ 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923,
+ 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934,
+ 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945,
+ 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956,
+ 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967,
+ 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978,
+ 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989,
+ 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000,
+ 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011,
+ 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_11[GAMMA10_TABLE_LENGTH] = {
+ 0, 1, 1, 2, 2, 3, 4, 4, 5, 6, 6,
+ 7, 8, 8, 9, 10, 11, 11, 12, 13, 13, 14,
+ 15, 16, 16, 17, 18, 19, 20, 20, 21, 22, 23,
+ 23, 24, 25, 26, 27, 27, 28, 29, 30, 31, 31,
+ 32, 33, 34, 35, 35, 36, 37, 38, 39, 39, 40,
+ 41, 42, 43, 44, 44, 45, 46, 47, 48, 49, 49,
+ 50, 51, 52, 53, 54, 54, 55, 56, 57, 58, 59,
+ 59, 60, 61, 62, 63, 64, 65, 65, 66, 67, 68,
+ 69, 70, 71, 71, 72, 73, 74, 75, 76, 77, 78,
+ 78, 79, 80, 81, 82, 83, 84, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
+ 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 191, 191, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 232, 233, 234, 235, 236, 237, 238, 239, 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
+ 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 261,
+ 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283,
+ 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
+ 306, 307, 307, 308, 309, 310, 311, 312, 313, 314, 315,
+ 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326,
+ 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
+ 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348,
+ 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359,
+ 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370,
+ 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381,
+ 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392,
+ 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403,
+ 404, 405, 406, 407, 408, 409, 410, 411, 412, 414, 415,
+ 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426,
+ 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437,
+ 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448,
+ 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459,
+ 460, 461, 462, 463, 464, 465, 466, 468, 469, 470, 471,
+ 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482,
+ 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493,
+ 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 505,
+ 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516,
+ 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527,
+ 528, 529, 530, 531, 532, 533, 535, 536, 537, 538, 539,
+ 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550,
+ 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 562,
+ 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573,
+ 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584,
+ 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596,
+ 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 608,
+ 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619,
+ 620, 621, 622, 623, 624, 625, 626, 627, 629, 630, 631,
+ 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642,
+ 643, 644, 645, 646, 648, 649, 650, 651, 652, 653, 654,
+ 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665,
+ 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677,
+ 678, 679, 680, 681, 682, 683, 685, 686, 687, 688, 689,
+ 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700,
+ 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712,
+ 713, 714, 715, 716, 717, 719, 720, 721, 722, 723, 724,
+ 725, 726, 727, 728, 729, 730, 731, 732, 733, 735, 736,
+ 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747,
+ 748, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759,
+ 760, 761, 762, 763, 764, 766, 767, 768, 769, 770, 771,
+ 772, 773, 774, 775, 776, 777, 778, 779, 781, 782, 783,
+ 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 795,
+ 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806,
+ 807, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818,
+ 819, 820, 821, 823, 824, 825, 826, 827, 828, 829, 830,
+ 831, 832, 833, 834, 835, 837, 838, 839, 840, 841, 842,
+ 843, 844, 845, 846, 847, 848, 850, 851, 852, 853, 854,
+ 855, 856, 857, 858, 859, 860, 861, 863, 864, 865, 866,
+ 867, 868, 869, 870, 871, 872, 873, 874, 876, 877, 878,
+ 879, 880, 881, 882, 883, 884, 885, 886, 887, 889, 890,
+ 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 902,
+ 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 914,
+ 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 926,
+ 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 938,
+ 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 950,
+ 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 962,
+ 963, 964, 965, 966, 967, 968, 969, 970, 971, 973, 974,
+ 975, 976, 977, 978, 979, 980, 981, 982, 983, 985, 986,
+ 987, 988, 989, 990, 991, 992, 993, 994, 996, 997, 998,
+ 999, 1000, 1001, 1002, 1003, 1004, 1005, 1007, 1008, 1009, 1010,
+ 1011, 1012, 1013, 1014, 1015, 1016, 1018, 1019, 1020, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_12[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 14, 14, 15, 15, 16,
+ 17, 17, 18, 18, 19, 20, 20, 21, 22, 22, 23,
+ 23, 24, 25, 25, 26, 27, 27, 28, 29, 29, 30,
+ 31, 31, 32, 33, 33, 34, 35, 35, 36, 37, 37,
+ 38, 39, 40, 40, 41, 42, 42, 43, 44, 44, 45,
+ 46, 47, 47, 48, 49, 49, 50, 51, 52, 52, 53,
+ 54, 55, 55, 56, 57, 58, 58, 59, 60, 61, 61,
+ 62, 63, 64, 64, 65, 66, 67, 67, 68, 69, 70,
+ 70, 71, 72, 73, 74, 74, 75, 76, 77, 77, 78,
+ 79, 80, 81, 81, 82, 83, 84, 84, 85, 86, 87,
+ 88, 88, 89, 90, 91, 92, 92, 93, 94, 95, 96,
+ 96, 97, 98, 99, 100, 101, 101, 102, 103, 104, 105,
+ 105, 106, 107, 108, 109, 110, 110, 111, 112, 113, 114,
+ 115, 115, 116, 117, 118, 119, 120, 120, 121, 122, 123,
+ 124, 125, 125, 126, 127, 128, 129, 130, 131, 131, 132,
+ 133, 134, 135, 136, 137, 137, 138, 139, 140, 141, 142,
+ 143, 143, 144, 145, 146, 147, 148, 149, 150, 150, 151,
+ 152, 153, 154, 155, 156, 157, 157, 158, 159, 160, 161,
+ 162, 163, 164, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172, 172, 173, 174, 175, 176, 177, 178, 179, 180, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190,
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221,
+ 222, 223, 224, 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273,
+ 274, 275, 276, 277, 278, 279, 280, 280, 281, 282, 283,
+ 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
+ 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316,
+ 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327,
+ 328, 329, 330, 331, 332, 333, 334, 334, 335, 336, 337,
+ 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348,
+ 349, 350, 351, 353, 354, 355, 356, 357, 358, 359, 360,
+ 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371,
+ 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382,
+ 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393,
+ 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404,
+ 405, 406, 407, 408, 409, 411, 412, 413, 414, 415, 416,
+ 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427,
+ 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 439,
+ 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450,
+ 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 462,
+ 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473,
+ 474, 475, 476, 477, 478, 479, 481, 482, 483, 484, 485,
+ 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496,
+ 497, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508,
+ 509, 510, 511, 512, 513, 515, 516, 517, 518, 519, 520,
+ 521, 522, 523, 524, 525, 526, 527, 529, 530, 531, 532,
+ 533, 534, 535, 536, 537, 538, 539, 540, 541, 543, 544,
+ 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555,
+ 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567,
+ 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579,
+ 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591,
+ 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 604,
+ 605, 606, 607, 608, 609, 610, 611, 612, 613, 615, 616,
+ 617, 618, 619, 620, 621, 622, 623, 624, 626, 627, 628,
+ 629, 630, 631, 632, 633, 634, 636, 637, 638, 639, 640,
+ 641, 642, 643, 644, 646, 647, 648, 649, 650, 651, 652,
+ 653, 654, 656, 657, 658, 659, 660, 661, 662, 663, 664,
+ 666, 667, 668, 669, 670, 671, 672, 673, 675, 676, 677,
+ 678, 679, 680, 681, 682, 683, 685, 686, 687, 688, 689,
+ 690, 691, 692, 694, 695, 696, 697, 698, 699, 700, 701,
+ 703, 704, 705, 706, 707, 708, 709, 710, 712, 713, 714,
+ 715, 716, 717, 718, 720, 721, 722, 723, 724, 725, 726,
+ 727, 729, 730, 731, 732, 733, 734, 735, 737, 738, 739,
+ 740, 741, 742, 743, 745, 746, 747, 748, 749, 750, 751,
+ 752, 754, 755, 756, 757, 758, 759, 760, 762, 763, 764,
+ 765, 766, 767, 768, 770, 771, 772, 773, 774, 775, 776,
+ 778, 779, 780, 781, 782, 783, 785, 786, 787, 788, 789,
+ 790, 791, 793, 794, 795, 796, 797, 798, 799, 801, 802,
+ 803, 804, 805, 806, 808, 809, 810, 811, 812, 813, 814,
+ 816, 817, 818, 819, 820, 821, 823, 824, 825, 826, 827,
+ 828, 830, 831, 832, 833, 834, 835, 836, 838, 839, 840,
+ 841, 842, 843, 845, 846, 847, 848, 849, 850, 852, 853,
+ 854, 855, 856, 857, 859, 860, 861, 862, 863, 864, 866,
+ 867, 868, 869, 870, 871, 873, 874, 875, 876, 877, 878,
+ 880, 881, 882, 883, 884, 885, 887, 888, 889, 890, 891,
+ 892, 894, 895, 896, 897, 898, 900, 901, 902, 903, 904,
+ 905, 907, 908, 909, 910, 911, 912, 914, 915, 916, 917,
+ 918, 920, 921, 922, 923, 924, 925, 927, 928, 929, 930,
+ 931, 932, 934, 935, 936, 937, 938, 940, 941, 942, 943,
+ 944, 946, 947, 948, 949, 950, 951, 953, 954, 955, 956,
+ 957, 959, 960, 961, 962, 963, 964, 966, 967, 968, 969,
+ 970, 972, 973, 974, 975, 976, 978, 979, 980, 981, 982,
+ 984, 985, 986, 987, 988, 989, 991, 992, 993, 994, 995,
+ 997, 998, 999, 1000, 1001, 1003, 1004, 1005, 1006, 1007, 1009,
+ 1010, 1011, 1012, 1013, 1015, 1016, 1017, 1018, 1019, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_13[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11,
+ 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17,
+ 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,
+ 23, 23, 24, 25, 25, 26, 26, 27, 27, 28, 28,
+ 29, 30, 30, 31, 31, 32, 32, 33, 34, 34, 35,
+ 35, 36, 37, 37, 38, 38, 39, 40, 40, 41, 42,
+ 42, 43, 43, 44, 45, 45, 46, 47, 47, 48, 48,
+ 49, 50, 50, 51, 52, 52, 53, 54, 54, 55, 56,
+ 56, 57, 58, 58, 59, 60, 60, 61, 62, 62, 63,
+ 64, 64, 65, 66, 67, 67, 68, 69, 69, 70, 71,
+ 71, 72, 73, 74, 74, 75, 76, 76, 77, 78, 79,
+ 79, 80, 81, 81, 82, 83, 84, 84, 85, 86, 87,
+ 87, 88, 89, 89, 90, 91, 92, 92, 93, 94, 95,
+ 95, 96, 97, 98, 98, 99, 100, 101, 102, 102, 103,
+ 104, 105, 105, 106, 107, 108, 108, 109, 110, 111, 112,
+ 112, 113, 114, 115, 115, 116, 117, 118, 119, 119, 120,
+ 121, 122, 123, 123, 124, 125, 126, 127, 127, 128, 129,
+ 130, 131, 131, 132, 133, 134, 135, 135, 136, 137, 138,
+ 139, 140, 140, 141, 142, 143, 144, 145, 145, 146, 147,
+ 148, 149, 149, 150, 151, 152, 153, 154, 155, 155, 156,
+ 157, 158, 159, 160, 160, 161, 162, 163, 164, 165, 166,
+ 166, 167, 168, 169, 170, 171, 172, 172, 173, 174, 175,
+ 176, 177, 178, 178, 179, 180, 181, 182, 183, 184, 185,
+ 185, 186, 187, 188, 189, 190, 191, 192, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 278, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330,
+ 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341,
+ 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363,
+ 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374,
+ 375, 376, 377, 378, 379, 380, 381, 383, 384, 385, 386,
+ 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397,
+ 398, 399, 400, 401, 402, 403, 404, 405, 407, 408, 409,
+ 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420,
+ 421, 422, 423, 424, 426, 427, 428, 429, 430, 431, 432,
+ 433, 434, 435, 436, 437, 438, 439, 440, 442, 443, 444,
+ 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455,
+ 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467,
+ 468, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479,
+ 480, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491,
+ 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 504,
+ 505, 506, 507, 508, 509, 510, 511, 512, 513, 515, 516,
+ 517, 518, 519, 520, 521, 522, 523, 525, 526, 527, 528,
+ 529, 530, 531, 532, 534, 535, 536, 537, 538, 539, 540,
+ 541, 542, 544, 545, 546, 547, 548, 549, 550, 551, 553,
+ 554, 555, 556, 557, 558, 559, 561, 562, 563, 564, 565,
+ 566, 567, 568, 570, 571, 572, 573, 574, 575, 576, 578,
+ 579, 580, 581, 582, 583, 584, 586, 587, 588, 589, 590,
+ 591, 592, 594, 595, 596, 597, 598, 599, 600, 602, 603,
+ 604, 605, 606, 607, 608, 610, 611, 612, 613, 614, 615,
+ 617, 618, 619, 620, 621, 622, 624, 625, 626, 627, 628,
+ 629, 630, 632, 633, 634, 635, 636, 637, 639, 640, 641,
+ 642, 643, 644, 646, 647, 648, 649, 650, 652, 653, 654,
+ 655, 656, 657, 659, 660, 661, 662, 663, 664, 666, 667,
+ 668, 669, 670, 671, 673, 674, 675, 676, 677, 679, 680,
+ 681, 682, 683, 684, 686, 687, 688, 689, 690, 692, 693,
+ 694, 695, 696, 698, 699, 700, 701, 702, 704, 705, 706,
+ 707, 708, 709, 711, 712, 713, 714, 715, 717, 718, 719,
+ 720, 721, 723, 724, 725, 726, 727, 729, 730, 731, 732,
+ 733, 735, 736, 737, 738, 739, 741, 742, 743, 744, 746,
+ 747, 748, 749, 750, 752, 753, 754, 755, 756, 758, 759,
+ 760, 761, 762, 764, 765, 766, 767, 769, 770, 771, 772,
+ 773, 775, 776, 777, 778, 780, 781, 782, 783, 784, 786,
+ 787, 788, 789, 791, 792, 793, 794, 795, 797, 798, 799,
+ 800, 802, 803, 804, 805, 807, 808, 809, 810, 811, 813,
+ 814, 815, 816, 818, 819, 820, 821, 823, 824, 825, 826,
+ 827, 829, 830, 831, 832, 834, 835, 836, 837, 839, 840,
+ 841, 842, 844, 845, 846, 847, 849, 850, 851, 852, 854,
+ 855, 856, 857, 859, 860, 861, 862, 864, 865, 866, 867,
+ 869, 870, 871, 872, 874, 875, 876, 877, 879, 880, 881,
+ 882, 884, 885, 886, 887, 889, 890, 891, 892, 894, 895,
+ 896, 897, 899, 900, 901, 903, 904, 905, 906, 908, 909,
+ 910, 911, 913, 914, 915, 916, 918, 919, 920, 922, 923,
+ 924, 925, 927, 928, 929, 930, 932, 933, 934, 935, 937,
+ 938, 939, 941, 942, 943, 944, 946, 947, 948, 950, 951,
+ 952, 953, 955, 956, 957, 958, 960, 961, 962, 964, 965,
+ 966, 967, 969, 970, 971, 973, 974, 975, 976, 978, 979,
+ 980, 982, 983, 984, 985, 987, 988, 989, 991, 992, 993,
+ 994, 996, 997, 998, 1000, 1001, 1002, 1004, 1005, 1006, 1007,
+ 1009, 1010, 1011, 1013, 1014, 1015, 1017, 1018, 1019, 1020, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_14[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4,
+ 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12, 12,
+ 12, 13, 13, 14, 14, 15, 15, 15, 16, 16, 17,
+ 17, 18, 18, 18, 19, 19, 20, 20, 21, 21, 22,
+ 22, 23, 23, 23, 24, 24, 25, 25, 26, 26, 27,
+ 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32,
+ 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38,
+ 39, 39, 40, 41, 41, 42, 42, 43, 43, 44, 45,
+ 45, 46, 46, 47, 47, 48, 49, 49, 50, 50, 51,
+ 52, 52, 53, 53, 54, 55, 55, 56, 56, 57, 58,
+ 58, 59, 59, 60, 61, 61, 62, 63, 63, 64, 64,
+ 65, 66, 66, 67, 68, 68, 69, 70, 70, 71, 72,
+ 72, 73, 74, 74, 75, 76, 76, 77, 78, 78, 79,
+ 80, 80, 81, 82, 82, 83, 84, 84, 85, 86, 86,
+ 87, 88, 88, 89, 90, 91, 91, 92, 93, 93, 94,
+ 95, 95, 96, 97, 98, 98, 99, 100, 100, 101, 102,
+ 103, 103, 104, 105, 106, 106, 107, 108, 109, 109, 110,
+ 111, 111, 112, 113, 114, 114, 115, 116, 117, 117, 118,
+ 119, 120, 120, 121, 122, 123, 124, 124, 125, 126, 127,
+ 127, 128, 129, 130, 130, 131, 132, 133, 134, 134, 135,
+ 136, 137, 138, 138, 139, 140, 141, 141, 142, 143, 144,
+ 145, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153,
+ 154, 154, 155, 156, 157, 158, 158, 159, 160, 161, 162,
+ 163, 163, 164, 165, 166, 167, 168, 168, 169, 170, 171,
+ 172, 173, 173, 174, 175, 176, 177, 178, 179, 179, 180,
+ 181, 182, 183, 184, 185, 185, 186, 187, 188, 189, 190,
+ 191, 191, 192, 193, 194, 195, 196, 197, 198, 198, 199,
+ 200, 201, 202, 203, 204, 205, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270,
+ 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281,
+ 282, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291,
+ 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313,
+ 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335,
+ 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346,
+ 347, 348, 349, 351, 352, 353, 354, 355, 356, 357, 358,
+ 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369,
+ 370, 371, 372, 373, 374, 375, 377, 378, 379, 380, 381,
+ 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392,
+ 393, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404,
+ 405, 406, 407, 408, 410, 411, 412, 413, 414, 415, 416,
+ 417, 418, 419, 420, 421, 423, 424, 425, 426, 427, 428,
+ 429, 430, 431, 432, 433, 435, 436, 437, 438, 439, 440,
+ 441, 442, 443, 444, 446, 447, 448, 449, 450, 451, 452,
+ 453, 454, 456, 457, 458, 459, 460, 461, 462, 463, 464,
+ 466, 467, 468, 469, 470, 471, 472, 473, 475, 476, 477,
+ 478, 479, 480, 481, 482, 484, 485, 486, 487, 488, 489,
+ 490, 491, 493, 494, 495, 496, 497, 498, 499, 501, 502,
+ 503, 504, 505, 506, 507, 509, 510, 511, 512, 513, 514,
+ 515, 517, 518, 519, 520, 521, 522, 524, 525, 526, 527,
+ 528, 529, 531, 532, 533, 534, 535, 536, 537, 539, 540,
+ 541, 542, 543, 544, 546, 547, 548, 549, 550, 552, 553,
+ 554, 555, 556, 557, 559, 560, 561, 562, 563, 564, 566,
+ 567, 568, 569, 570, 572, 573, 574, 575, 576, 578, 579,
+ 580, 581, 582, 583, 585, 586, 587, 588, 589, 591, 592,
+ 593, 594, 595, 597, 598, 599, 600, 601, 603, 604, 605,
+ 606, 607, 609, 610, 611, 612, 613, 615, 616, 617, 618,
+ 620, 621, 622, 623, 624, 626, 627, 628, 629, 630, 632,
+ 633, 634, 635, 637, 638, 639, 640, 641, 643, 644, 645,
+ 646, 648, 649, 650, 651, 653, 654, 655, 656, 657, 659,
+ 660, 661, 662, 664, 665, 666, 667, 669, 670, 671, 672,
+ 674, 675, 676, 677, 679, 680, 681, 682, 684, 685, 686,
+ 687, 689, 690, 691, 692, 694, 695, 696, 697, 699, 700,
+ 701, 702, 704, 705, 706, 707, 709, 710, 711, 712, 714,
+ 715, 716, 717, 719, 720, 721, 723, 724, 725, 726, 728,
+ 729, 730, 731, 733, 734, 735, 737, 738, 739, 740, 742,
+ 743, 744, 745, 747, 748, 749, 751, 752, 753, 754, 756,
+ 757, 758, 760, 761, 762, 763, 765, 766, 767, 769, 770,
+ 771, 772, 774, 775, 776, 778, 779, 780, 782, 783, 784,
+ 785, 787, 788, 789, 791, 792, 793, 794, 796, 797, 798,
+ 800, 801, 802, 804, 805, 806, 808, 809, 810, 811, 813,
+ 814, 815, 817, 818, 819, 821, 822, 823, 825, 826, 827,
+ 829, 830, 831, 833, 834, 835, 836, 838, 839, 840, 842,
+ 843, 844, 846, 847, 848, 850, 851, 852, 854, 855, 856,
+ 858, 859, 860, 862, 863, 864, 866, 867, 868, 870, 871,
+ 872, 874, 875, 876, 878, 879, 880, 882, 883, 884, 886,
+ 887, 888, 890, 891, 893, 894, 895, 897, 898, 899, 901,
+ 902, 903, 905, 906, 907, 909, 910, 911, 913, 914, 915,
+ 917, 918, 920, 921, 922, 924, 925, 926, 928, 929, 930,
+ 932, 933, 935, 936, 937, 939, 940, 941, 943, 944, 945,
+ 947, 948, 950, 951, 952, 954, 955, 956, 958, 959, 961,
+ 962, 963, 965, 966, 967, 969, 970, 972, 973, 974, 976,
+ 977, 978, 980, 981, 983, 984, 985, 987, 988, 990, 991,
+ 992, 994, 995, 996, 998, 999, 1001, 1002, 1003, 1005, 1006,
+ 1008, 1009, 1010, 1012, 1013, 1015, 1016, 1017, 1019, 1020, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_15[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12,
+ 13, 13, 13, 14, 14, 15, 15, 15, 16, 16, 16,
+ 17, 17, 18, 18, 18, 19, 19, 20, 20, 20, 21,
+ 21, 22, 22, 22, 23, 23, 24, 24, 25, 25, 25,
+ 26, 26, 27, 27, 28, 28, 28, 29, 29, 30, 30,
+ 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36,
+ 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
+ 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47,
+ 47, 48, 48, 49, 50, 50, 51, 51, 52, 52, 53,
+ 53, 54, 55, 55, 56, 56, 57, 57, 58, 59, 59,
+ 60, 60, 61, 62, 62, 63, 63, 64, 64, 65, 66,
+ 66, 67, 67, 68, 69, 69, 70, 71, 71, 72, 72,
+ 73, 74, 74, 75, 76, 76, 77, 77, 78, 79, 79,
+ 80, 81, 81, 82, 83, 83, 84, 84, 85, 86, 86,
+ 87, 88, 88, 89, 90, 90, 91, 92, 92, 93, 94,
+ 94, 95, 96, 97, 97, 98, 99, 99, 100, 101, 101,
+ 102, 103, 103, 104, 105, 106, 106, 107, 108, 108, 109,
+ 110, 110, 111, 112, 113, 113, 114, 115, 116, 116, 117,
+ 118, 118, 119, 120, 121, 121, 122, 123, 124, 124, 125,
+ 126, 127, 127, 128, 129, 130, 130, 131, 132, 133, 133,
+ 134, 135, 136, 136, 137, 138, 139, 139, 140, 141, 142,
+ 143, 143, 144, 145, 146, 146, 147, 148, 149, 150, 150,
+ 151, 152, 153, 154, 154, 155, 156, 157, 158, 158, 159,
+ 160, 161, 162, 162, 163, 164, 165, 166, 167, 167, 168,
+ 169, 170, 171, 171, 172, 173, 174, 175, 176, 176, 177,
+ 178, 179, 180, 181, 181, 182, 183, 184, 185, 186, 187,
+ 187, 188, 189, 190, 191, 192, 193, 193, 194, 195, 196,
+ 197, 198, 199, 199, 200, 201, 202, 203, 204, 205, 206,
+ 206, 207, 208, 209, 210, 211, 212, 213, 214, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 233, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 245,
+ 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256,
+ 257, 258, 259, 260, 261, 261, 262, 263, 264, 265, 266,
+ 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288,
+ 289, 290, 291, 292, 293, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 314, 315, 316, 317, 318, 319, 320, 321,
+ 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343,
+ 344, 345, 346, 347, 349, 350, 351, 352, 353, 354, 355,
+ 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366,
+ 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378,
+ 379, 380, 381, 383, 384, 385, 386, 387, 388, 389, 390,
+ 391, 392, 393, 395, 396, 397, 398, 399, 400, 401, 402,
+ 403, 404, 405, 407, 408, 409, 410, 411, 412, 413, 414,
+ 415, 417, 418, 419, 420, 421, 422, 423, 424, 425, 427,
+ 428, 429, 430, 431, 432, 433, 434, 436, 437, 438, 439,
+ 440, 441, 442, 444, 445, 446, 447, 448, 449, 450, 451,
+ 453, 454, 455, 456, 457, 458, 460, 461, 462, 463, 464,
+ 465, 466, 468, 469, 470, 471, 472, 473, 475, 476, 477,
+ 478, 479, 480, 482, 483, 484, 485, 486, 487, 489, 490,
+ 491, 492, 493, 494, 496, 497, 498, 499, 500, 501, 503,
+ 504, 505, 506, 507, 509, 510, 511, 512, 513, 515, 516,
+ 517, 518, 519, 521, 522, 523, 524, 525, 527, 528, 529,
+ 530, 531, 533, 534, 535, 536, 537, 539, 540, 541, 542,
+ 543, 545, 546, 547, 548, 550, 551, 552, 553, 554, 556,
+ 557, 558, 559, 561, 562, 563, 564, 565, 567, 568, 569,
+ 570, 572, 573, 574, 575, 577, 578, 579, 580, 582, 583,
+ 584, 585, 587, 588, 589, 590, 591, 593, 594, 595, 596,
+ 598, 599, 600, 602, 603, 604, 605, 607, 608, 609, 610,
+ 612, 613, 614, 615, 617, 618, 619, 620, 622, 623, 624,
+ 626, 627, 628, 629, 631, 632, 633, 634, 636, 637, 638,
+ 640, 641, 642, 643, 645, 646, 647, 649, 650, 651, 652,
+ 654, 655, 656, 658, 659, 660, 662, 663, 664, 665, 667,
+ 668, 669, 671, 672, 673, 675, 676, 677, 678, 680, 681,
+ 682, 684, 685, 686, 688, 689, 690, 692, 693, 694, 696,
+ 697, 698, 700, 701, 702, 703, 705, 706, 707, 709, 710,
+ 711, 713, 714, 715, 717, 718, 719, 721, 722, 723, 725,
+ 726, 727, 729, 730, 731, 733, 734, 735, 737, 738, 740,
+ 741, 742, 744, 745, 746, 748, 749, 750, 752, 753, 754,
+ 756, 757, 758, 760, 761, 763, 764, 765, 767, 768, 769,
+ 771, 772, 773, 775, 776, 778, 779, 780, 782, 783, 784,
+ 786, 787, 789, 790, 791, 793, 794, 795, 797, 798, 800,
+ 801, 802, 804, 805, 806, 808, 809, 811, 812, 813, 815,
+ 816, 818, 819, 820, 822, 823, 825, 826, 827, 829, 830,
+ 832, 833, 834, 836, 837, 839, 840, 841, 843, 844, 846,
+ 847, 848, 850, 851, 853, 854, 855, 857, 858, 860, 861,
+ 863, 864, 865, 867, 868, 870, 871, 872, 874, 875, 877,
+ 878, 880, 881, 882, 884, 885, 887, 888, 890, 891, 892,
+ 894, 895, 897, 898, 900, 901, 902, 904, 905, 907, 908,
+ 910, 911, 913, 914, 915, 917, 918, 920, 921, 923, 924,
+ 926, 927, 929, 930, 931, 933, 934, 936, 937, 939, 940,
+ 942, 943, 945, 946, 947, 949, 950, 952, 953, 955, 956,
+ 958, 959, 961, 962, 964, 965, 967, 968, 969, 971, 972,
+ 974, 975, 977, 978, 980, 981, 983, 984, 986, 987, 989,
+ 990, 992, 993, 995, 996, 998, 999, 1001, 1002, 1004, 1005,
+ 1007, 1008, 1010, 1011, 1013, 1014, 1016, 1017, 1019, 1020, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_16[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9,
+ 10, 10, 10, 10, 11, 11, 11, 12, 12, 12, 12,
+ 13, 13, 13, 14, 14, 14, 15, 15, 15, 16, 16,
+ 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 20,
+ 20, 21, 21, 21, 22, 22, 22, 23, 23, 24, 24,
+ 24, 25, 25, 26, 26, 26, 27, 27, 28, 28, 28,
+ 29, 29, 30, 30, 31, 31, 31, 32, 32, 33, 33,
+ 34, 34, 35, 35, 35, 36, 36, 37, 37, 38, 38,
+ 39, 39, 40, 40, 41, 41, 41, 42, 42, 43, 43,
+ 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49,
+ 49, 50, 50, 51, 52, 52, 53, 53, 54, 54, 55,
+ 55, 56, 56, 57, 57, 58, 58, 59, 60, 60, 61,
+ 61, 62, 62, 63, 63, 64, 65, 65, 66, 66, 67,
+ 67, 68, 69, 69, 70, 70, 71, 72, 72, 73, 73,
+ 74, 75, 75, 76, 76, 77, 78, 78, 79, 79, 80,
+ 81, 81, 82, 82, 83, 84, 84, 85, 86, 86, 87,
+ 87, 88, 89, 89, 90, 91, 91, 92, 93, 93, 94,
+ 95, 95, 96, 97, 97, 98, 99, 99, 100, 101, 101,
+ 102, 103, 103, 104, 105, 105, 106, 107, 107, 108, 109,
+ 109, 110, 111, 111, 112, 113, 114, 114, 115, 116, 116,
+ 117, 118, 119, 119, 120, 121, 121, 122, 123, 124, 124,
+ 125, 126, 126, 127, 128, 129, 129, 130, 131, 132, 132,
+ 133, 134, 135, 135, 136, 137, 138, 138, 139, 140, 141,
+ 141, 142, 143, 144, 144, 145, 146, 147, 148, 148, 149,
+ 150, 151, 151, 152, 153, 154, 155, 155, 156, 157, 158,
+ 159, 159, 160, 161, 162, 163, 163, 164, 165, 166, 167,
+ 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176,
+ 176, 177, 178, 179, 180, 181, 181, 182, 183, 184, 185,
+ 186, 186, 187, 188, 189, 190, 191, 192, 192, 193, 194,
+ 195, 196, 197, 198, 198, 199, 200, 201, 202, 203, 204,
+ 204, 205, 206, 207, 208, 209, 210, 211, 212, 212, 213,
+ 214, 215, 216, 217, 218, 219, 220, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 230, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241, 242, 242, 243,
+ 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
+ 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 326, 327, 329, 330, 331,
+ 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
+ 343, 344, 345, 346, 348, 349, 350, 351, 352, 353, 354,
+ 355, 356, 357, 358, 359, 360, 362, 363, 364, 365, 366,
+ 367, 368, 369, 370, 371, 372, 374, 375, 376, 377, 378,
+ 379, 380, 381, 382, 383, 385, 386, 387, 388, 389, 390,
+ 391, 392, 393, 395, 396, 397, 398, 399, 400, 401, 402,
+ 404, 405, 406, 407, 408, 409, 410, 411, 413, 414, 415,
+ 416, 417, 418, 419, 421, 422, 423, 424, 425, 426, 428,
+ 429, 430, 431, 432, 433, 434, 436, 437, 438, 439, 440,
+ 441, 443, 444, 445, 446, 447, 448, 450, 451, 452, 453,
+ 454, 456, 457, 458, 459, 460, 461, 463, 464, 465, 466,
+ 467, 469, 470, 471, 472, 473, 475, 476, 477, 478, 479,
+ 481, 482, 483, 484, 485, 487, 488, 489, 490, 491, 493,
+ 494, 495, 496, 498, 499, 500, 501, 502, 504, 505, 506,
+ 507, 509, 510, 511, 512, 514, 515, 516, 517, 519, 520,
+ 521, 522, 523, 525, 526, 527, 528, 530, 531, 532, 533,
+ 535, 536, 537, 538, 540, 541, 542, 544, 545, 546, 547,
+ 549, 550, 551, 552, 554, 555, 556, 557, 559, 560, 561,
+ 563, 564, 565, 566, 568, 569, 570, 572, 573, 574, 575,
+ 577, 578, 579, 581, 582, 583, 584, 586, 587, 588, 590,
+ 591, 592, 594, 595, 596, 598, 599, 600, 601, 603, 604,
+ 605, 607, 608, 609, 611, 612, 613, 615, 616, 617, 619,
+ 620, 621, 623, 624, 625, 627, 628, 629, 631, 632, 633,
+ 635, 636, 637, 639, 640, 641, 643, 644, 645, 647, 648,
+ 649, 651, 652, 653, 655, 656, 657, 659, 660, 662, 663,
+ 664, 666, 667, 668, 670, 671, 672, 674, 675, 677, 678,
+ 679, 681, 682, 683, 685, 686, 688, 689, 690, 692, 693,
+ 694, 696, 697, 699, 700, 701, 703, 704, 706, 707, 708,
+ 710, 711, 712, 714, 715, 717, 718, 719, 721, 722, 724,
+ 725, 727, 728, 729, 731, 732, 734, 735, 736, 738, 739,
+ 741, 742, 743, 745, 746, 748, 749, 751, 752, 753, 755,
+ 756, 758, 759, 761, 762, 763, 765, 766, 768, 769, 771,
+ 772, 774, 775, 776, 778, 779, 781, 782, 784, 785, 787,
+ 788, 789, 791, 792, 794, 795, 797, 798, 800, 801, 803,
+ 804, 805, 807, 808, 810, 811, 813, 814, 816, 817, 819,
+ 820, 822, 823, 825, 826, 827, 829, 830, 832, 833, 835,
+ 836, 838, 839, 841, 842, 844, 845, 847, 848, 850, 851,
+ 853, 854, 856, 857, 859, 860, 862, 863, 865, 866, 868,
+ 869, 871, 872, 874, 875, 877, 878, 880, 881, 883, 884,
+ 886, 887, 889, 890, 892, 893, 895, 897, 898, 900, 901,
+ 903, 904, 906, 907, 909, 910, 912, 913, 915, 916, 918,
+ 919, 921, 923, 924, 926, 927, 929, 930, 932, 933, 935,
+ 936, 938, 940, 941, 943, 944, 946, 947, 949, 950, 952,
+ 954, 955, 957, 958, 960, 961, 963, 964, 966, 968, 969,
+ 971, 972, 974, 975, 977, 979, 980, 982, 983, 985, 986,
+ 988, 990, 991, 993, 994, 996, 998, 999, 1001, 1002, 1004,
+ 1005, 1007, 1009, 1010, 1012, 1013, 1015, 1017, 1018, 1020, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_17[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5,
+ 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7,
+ 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9,
+ 10, 10, 10, 10, 11, 11, 11, 12, 12, 12, 12,
+ 13, 13, 13, 13, 14, 14, 14, 15, 15, 15, 15,
+ 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19,
+ 19, 20, 20, 20, 21, 21, 21, 22, 22, 22, 23,
+ 23, 23, 24, 24, 25, 25, 25, 26, 26, 26, 27,
+ 27, 28, 28, 28, 29, 29, 29, 30, 30, 31, 31,
+ 31, 32, 32, 33, 33, 34, 34, 34, 35, 35, 36,
+ 36, 37, 37, 37, 38, 38, 39, 39, 40, 40, 40,
+ 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46,
+ 46, 46, 47, 47, 48, 48, 49, 49, 50, 50, 51,
+ 51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56,
+ 57, 57, 58, 58, 59, 60, 60, 61, 61, 62, 62,
+ 63, 63, 64, 64, 65, 65, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 72, 72, 73, 73, 74, 74,
+ 75, 76, 76, 77, 77, 78, 79, 79, 80, 80, 81,
+ 82, 82, 83, 83, 84, 85, 85, 86, 86, 87, 88,
+ 88, 89, 89, 90, 91, 91, 92, 93, 93, 94, 95,
+ 95, 96, 96, 97, 98, 98, 99, 100, 100, 101, 102,
+ 102, 103, 104, 104, 105, 106, 106, 107, 108, 108, 109,
+ 110, 110, 111, 112, 112, 113, 114, 114, 115, 116, 116,
+ 117, 118, 119, 119, 120, 121, 121, 122, 123, 124, 124,
+ 125, 126, 126, 127, 128, 129, 129, 130, 131, 131, 132,
+ 133, 134, 134, 135, 136, 137, 137, 138, 139, 140, 140,
+ 141, 142, 143, 143, 144, 145, 146, 146, 147, 148, 149,
+ 149, 150, 151, 152, 153, 153, 154, 155, 156, 156, 157,
+ 158, 159, 160, 160, 161, 162, 163, 164, 164, 165, 166,
+ 167, 168, 168, 169, 170, 171, 172, 172, 173, 174, 175,
+ 176, 177, 177, 178, 179, 180, 181, 182, 182, 183, 184,
+ 185, 186, 187, 187, 188, 189, 190, 191, 192, 193, 193,
+ 194, 195, 196, 197, 198, 199, 199, 200, 201, 202, 203,
+ 204, 205, 206, 206, 207, 208, 209, 210, 211, 212, 213,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243,
+ 244, 245, 246, 247, 248, 248, 249, 250, 251, 252, 253,
+ 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
+ 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 319, 320,
+ 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331,
+ 332, 333, 334, 336, 337, 338, 339, 340, 341, 342, 343,
+ 344, 345, 346, 347, 349, 350, 351, 352, 353, 354, 355,
+ 356, 357, 358, 360, 361, 362, 363, 364, 365, 366, 367,
+ 368, 370, 371, 372, 373, 374, 375, 376, 377, 379, 380,
+ 381, 382, 383, 384, 385, 386, 388, 389, 390, 391, 392,
+ 393, 394, 396, 397, 398, 399, 400, 401, 403, 404, 405,
+ 406, 407, 408, 409, 411, 412, 413, 414, 415, 417, 418,
+ 419, 420, 421, 422, 424, 425, 426, 427, 428, 430, 431,
+ 432, 433, 434, 435, 437, 438, 439, 440, 441, 443, 444,
+ 445, 446, 448, 449, 450, 451, 452, 454, 455, 456, 457,
+ 458, 460, 461, 462, 463, 465, 466, 467, 468, 469, 471,
+ 472, 473, 474, 476, 477, 478, 479, 481, 482, 483, 484,
+ 486, 487, 488, 489, 491, 492, 493, 494, 496, 497, 498,
+ 499, 501, 502, 503, 505, 506, 507, 508, 510, 511, 512,
+ 513, 515, 516, 517, 519, 520, 521, 522, 524, 525, 526,
+ 528, 529, 530, 532, 533, 534, 535, 537, 538, 539, 541,
+ 542, 543, 545, 546, 547, 549, 550, 551, 552, 554, 555,
+ 556, 558, 559, 560, 562, 563, 564, 566, 567, 568, 570,
+ 571, 572, 574, 575, 576, 578, 579, 580, 582, 583, 584,
+ 586, 587, 589, 590, 591, 593, 594, 595, 597, 598, 599,
+ 601, 602, 604, 605, 606, 608, 609, 610, 612, 613, 615,
+ 616, 617, 619, 620, 621, 623, 624, 626, 627, 628, 630,
+ 631, 633, 634, 635, 637, 638, 640, 641, 642, 644, 645,
+ 647, 648, 649, 651, 652, 654, 655, 656, 658, 659, 661,
+ 662, 664, 665, 666, 668, 669, 671, 672, 674, 675, 676,
+ 678, 679, 681, 682, 684, 685, 686, 688, 689, 691, 692,
+ 694, 695, 697, 698, 699, 701, 702, 704, 705, 707, 708,
+ 710, 711, 713, 714, 716, 717, 718, 720, 721, 723, 724,
+ 726, 727, 729, 730, 732, 733, 735, 736, 738, 739, 741,
+ 742, 744, 745, 747, 748, 750, 751, 753, 754, 756, 757,
+ 759, 760, 762, 763, 765, 766, 768, 769, 771, 772, 774,
+ 775, 777, 778, 780, 781, 783, 784, 786, 787, 789, 790,
+ 792, 793, 795, 797, 798, 800, 801, 803, 804, 806, 807,
+ 809, 810, 812, 814, 815, 817, 818, 820, 821, 823, 824,
+ 826, 827, 829, 831, 832, 834, 835, 837, 838, 840, 842,
+ 843, 845, 846, 848, 849, 851, 853, 854, 856, 857, 859,
+ 860, 862, 864, 865, 867, 868, 870, 872, 873, 875, 876,
+ 878, 880, 881, 883, 884, 886, 888, 889, 891, 892, 894,
+ 896, 897, 899, 900, 902, 904, 905, 907, 908, 910, 912,
+ 913, 915, 917, 918, 920, 921, 923, 925, 926, 928, 930,
+ 931, 933, 935, 936, 938, 939, 941, 943, 944, 946, 948,
+ 949, 951, 953, 954, 956, 958, 959, 961, 963, 964, 966,
+ 968, 969, 971, 973, 974, 976, 978, 979, 981, 983, 984,
+ 986, 988, 989, 991, 993, 994, 996, 998, 999, 1001, 1003,
+ 1004, 1006, 1008, 1009, 1011, 1013, 1015, 1016, 1018, 1020, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_18[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5,
+ 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,
+ 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
+ 15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18,
+ 18, 19, 19, 19, 20, 20, 20, 21, 21, 21, 22,
+ 22, 22, 23, 23, 23, 24, 24, 24, 25, 25, 25,
+ 26, 26, 26, 27, 27, 27, 28, 28, 29, 29, 29,
+ 30, 30, 30, 31, 31, 32, 32, 32, 33, 33, 33,
+ 34, 34, 35, 35, 35, 36, 36, 37, 37, 38, 38,
+ 38, 39, 39, 40, 40, 40, 41, 41, 42, 42, 43,
+ 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48,
+ 48, 48, 49, 49, 50, 50, 51, 51, 52, 52, 53,
+ 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58,
+ 59, 59, 60, 60, 61, 61, 62, 62, 63, 63, 64,
+ 64, 65, 65, 66, 66, 67, 68, 68, 69, 69, 70,
+ 70, 71, 71, 72, 72, 73, 74, 74, 75, 75, 76,
+ 76, 77, 78, 78, 79, 79, 80, 80, 81, 82, 82,
+ 83, 83, 84, 85, 85, 86, 86, 87, 88, 88, 89,
+ 89, 90, 91, 91, 92, 92, 93, 94, 94, 95, 96,
+ 96, 97, 97, 98, 99, 99, 100, 101, 101, 102, 103,
+ 103, 104, 104, 105, 106, 106, 107, 108, 108, 109, 110,
+ 110, 111, 112, 112, 113, 114, 114, 115, 116, 117, 117,
+ 118, 119, 119, 120, 121, 121, 122, 123, 123, 124, 125,
+ 126, 126, 127, 128, 128, 129, 130, 131, 131, 132, 133,
+ 133, 134, 135, 136, 136, 137, 138, 139, 139, 140, 141,
+ 142, 142, 143, 144, 145, 145, 146, 147, 148, 148, 149,
+ 150, 151, 151, 152, 153, 154, 155, 155, 156, 157, 158,
+ 158, 159, 160, 161, 162, 162, 163, 164, 165, 166, 166,
+ 167, 168, 169, 170, 170, 171, 172, 173, 174, 175, 175,
+ 176, 177, 178, 179, 179, 180, 181, 182, 183, 184, 184,
+ 185, 186, 187, 188, 189, 190, 190, 191, 192, 193, 194,
+ 195, 196, 196, 197, 198, 199, 200, 201, 202, 203, 203,
+ 204, 205, 206, 207, 208, 209, 210, 210, 211, 212, 213,
+ 214, 215, 216, 217, 218, 219, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 230, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298,
+ 299, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310,
+ 311, 312, 313, 314, 315, 316, 317, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 333, 334,
+ 335, 336, 337, 338, 339, 340, 341, 342, 344, 345, 346,
+ 347, 348, 349, 350, 351, 353, 354, 355, 356, 357, 358,
+ 359, 360, 362, 363, 364, 365, 366, 367, 368, 370, 371,
+ 372, 373, 374, 375, 376, 378, 379, 380, 381, 382, 383,
+ 385, 386, 387, 388, 389, 390, 392, 393, 394, 395, 396,
+ 397, 399, 400, 401, 402, 403, 405, 406, 407, 408, 409,
+ 411, 412, 413, 414, 415, 417, 418, 419, 420, 421, 423,
+ 424, 425, 426, 427, 429, 430, 431, 432, 434, 435, 436,
+ 437, 439, 440, 441, 442, 443, 445, 446, 447, 448, 450,
+ 451, 452, 453, 455, 456, 457, 458, 460, 461, 462, 464,
+ 465, 466, 467, 469, 470, 471, 472, 474, 475, 476, 478,
+ 479, 480, 481, 483, 484, 485, 487, 488, 489, 490, 492,
+ 493, 494, 496, 497, 498, 500, 501, 502, 504, 505, 506,
+ 507, 509, 510, 511, 513, 514, 515, 517, 518, 519, 521,
+ 522, 523, 525, 526, 527, 529, 530, 531, 533, 534, 535,
+ 537, 538, 540, 541, 542, 544, 545, 546, 548, 549, 550,
+ 552, 553, 555, 556, 557, 559, 560, 561, 563, 564, 566,
+ 567, 568, 570, 571, 572, 574, 575, 577, 578, 579, 581,
+ 582, 584, 585, 586, 588, 589, 591, 592, 594, 595, 596,
+ 598, 599, 601, 602, 603, 605, 606, 608, 609, 611, 612,
+ 613, 615, 616, 618, 619, 621, 622, 624, 625, 626, 628,
+ 629, 631, 632, 634, 635, 637, 638, 640, 641, 642, 644,
+ 645, 647, 648, 650, 651, 653, 654, 656, 657, 659, 660,
+ 662, 663, 665, 666, 668, 669, 671, 672, 673, 675, 676,
+ 678, 679, 681, 682, 684, 686, 687, 689, 690, 692, 693,
+ 695, 696, 698, 699, 701, 702, 704, 705, 707, 708, 710,
+ 711, 713, 714, 716, 717, 719, 721, 722, 724, 725, 727,
+ 728, 730, 731, 733, 734, 736, 738, 739, 741, 742, 744,
+ 745, 747, 749, 750, 752, 753, 755, 756, 758, 760, 761,
+ 763, 764, 766, 767, 769, 771, 772, 774, 775, 777, 779,
+ 780, 782, 783, 785, 787, 788, 790, 791, 793, 795, 796,
+ 798, 799, 801, 803, 804, 806, 807, 809, 811, 812, 814,
+ 816, 817, 819, 820, 822, 824, 825, 827, 829, 830, 832,
+ 834, 835, 837, 839, 840, 842, 843, 845, 847, 848, 850,
+ 852, 853, 855, 857, 858, 860, 862, 863, 865, 867, 868,
+ 870, 872, 873, 875, 877, 878, 880, 882, 884, 885, 887,
+ 889, 890, 892, 894, 895, 897, 899, 900, 902, 904, 906,
+ 907, 909, 911, 912, 914, 916, 918, 919, 921, 923, 924,
+ 926, 928, 930, 931, 933, 935, 936, 938, 940, 942, 943,
+ 945, 947, 949, 950, 952, 954, 956, 957, 959, 961, 963,
+ 964, 966, 968, 970, 971, 973, 975, 977, 978, 980, 982,
+ 984, 986, 987, 989, 991, 993, 994, 996, 998, 1000, 1002,
+ 1003, 1005, 1007, 1009, 1010, 1012, 1014, 1016, 1018, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_19[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 11, 11, 11, 11, 11, 12, 12,
+ 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15,
+ 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 17,
+ 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 21,
+ 21, 21, 22, 22, 22, 22, 23, 23, 23, 24, 24,
+ 24, 25, 25, 25, 26, 26, 26, 27, 27, 27, 28,
+ 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 32,
+ 32, 32, 33, 33, 33, 34, 34, 35, 35, 35, 36,
+ 36, 36, 37, 37, 38, 38, 38, 39, 39, 40, 40,
+ 41, 41, 41, 42, 42, 43, 43, 43, 44, 44, 45,
+ 45, 46, 46, 46, 47, 47, 48, 48, 49, 49, 50,
+ 50, 51, 51, 51, 52, 52, 53, 53, 54, 54, 55,
+ 55, 56, 56, 57, 57, 58, 58, 59, 59, 60, 60,
+ 61, 61, 62, 62, 63, 63, 64, 64, 65, 65, 66,
+ 66, 67, 67, 68, 68, 69, 69, 70, 70, 71, 71,
+ 72, 72, 73, 74, 74, 75, 75, 76, 76, 77, 77,
+ 78, 79, 79, 80, 80, 81, 81, 82, 83, 83, 84,
+ 84, 85, 85, 86, 87, 87, 88, 88, 89, 90, 90,
+ 91, 91, 92, 93, 93, 94, 94, 95, 96, 96, 97,
+ 98, 98, 99, 99, 100, 101, 101, 102, 103, 103, 104,
+ 105, 105, 106, 107, 107, 108, 108, 109, 110, 110, 111,
+ 112, 112, 113, 114, 114, 115, 116, 116, 117, 118, 119,
+ 119, 120, 121, 121, 122, 123, 123, 124, 125, 125, 126,
+ 127, 128, 128, 129, 130, 130, 131, 132, 133, 133, 134,
+ 135, 135, 136, 137, 138, 138, 139, 140, 141, 141, 142,
+ 143, 144, 144, 145, 146, 147, 147, 148, 149, 150, 150,
+ 151, 152, 153, 154, 154, 155, 156, 157, 157, 158, 159,
+ 160, 161, 161, 162, 163, 164, 165, 165, 166, 167, 168,
+ 169, 169, 170, 171, 172, 173, 173, 174, 175, 176, 177,
+ 178, 178, 179, 180, 181, 182, 183, 183, 184, 185, 186,
+ 187, 188, 188, 189, 190, 191, 192, 193, 194, 195, 195,
+ 196, 197, 198, 199, 200, 201, 201, 202, 203, 204, 205,
+ 206, 207, 208, 209, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 219, 220, 221, 222, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 232, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246,
+ 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290,
+ 291, 292, 293, 294, 295, 296, 297, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 326,
+ 327, 328, 329, 330, 331, 332, 333, 334, 336, 337, 338,
+ 339, 340, 341, 342, 343, 345, 346, 347, 348, 349, 350,
+ 351, 353, 354, 355, 356, 357, 358, 360, 361, 362, 363,
+ 364, 365, 367, 368, 369, 370, 371, 372, 374, 375, 376,
+ 377, 378, 379, 381, 382, 383, 384, 385, 387, 388, 389,
+ 390, 391, 393, 394, 395, 396, 397, 399, 400, 401, 402,
+ 404, 405, 406, 407, 408, 410, 411, 412, 413, 415, 416,
+ 417, 418, 420, 421, 422, 423, 425, 426, 427, 428, 430,
+ 431, 432, 433, 435, 436, 437, 439, 440, 441, 442, 444,
+ 445, 446, 447, 449, 450, 451, 453, 454, 455, 456, 458,
+ 459, 460, 462, 463, 464, 466, 467, 468, 470, 471, 472,
+ 473, 475, 476, 477, 479, 480, 481, 483, 484, 485, 487,
+ 488, 489, 491, 492, 493, 495, 496, 498, 499, 500, 502,
+ 503, 504, 506, 507, 508, 510, 511, 512, 514, 515, 517,
+ 518, 519, 521, 522, 523, 525, 526, 528, 529, 530, 532,
+ 533, 535, 536, 537, 539, 540, 542, 543, 544, 546, 547,
+ 549, 550, 551, 553, 554, 556, 557, 559, 560, 561, 563,
+ 564, 566, 567, 569, 570, 572, 573, 574, 576, 577, 579,
+ 580, 582, 583, 585, 586, 587, 589, 590, 592, 593, 595,
+ 596, 598, 599, 601, 602, 604, 605, 607, 608, 610, 611,
+ 613, 614, 616, 617, 619, 620, 622, 623, 625, 626, 628,
+ 629, 631, 632, 634, 635, 637, 638, 640, 641, 643, 644,
+ 646, 647, 649, 650, 652, 653, 655, 656, 658, 660, 661,
+ 663, 664, 666, 667, 669, 670, 672, 674, 675, 677, 678,
+ 680, 681, 683, 684, 686, 688, 689, 691, 692, 694, 696,
+ 697, 699, 700, 702, 703, 705, 707, 708, 710, 711, 713,
+ 715, 716, 718, 719, 721, 723, 724, 726, 728, 729, 731,
+ 732, 734, 736, 737, 739, 741, 742, 744, 745, 747, 749,
+ 750, 752, 754, 755, 757, 759, 760, 762, 764, 765, 767,
+ 768, 770, 772, 773, 775, 777, 778, 780, 782, 783, 785,
+ 787, 789, 790, 792, 794, 795, 797, 799, 800, 802, 804,
+ 805, 807, 809, 810, 812, 814, 816, 817, 819, 821, 822,
+ 824, 826, 828, 829, 831, 833, 834, 836, 838, 840, 841,
+ 843, 845, 847, 848, 850, 852, 854, 855, 857, 859, 861,
+ 862, 864, 866, 868, 869, 871, 873, 875, 876, 878, 880,
+ 882, 883, 885, 887, 889, 891, 892, 894, 896, 898, 899,
+ 901, 903, 905, 907, 908, 910, 912, 914, 916, 917, 919,
+ 921, 923, 925, 926, 928, 930, 932, 934, 936, 937, 939,
+ 941, 943, 945, 947, 948, 950, 952, 954, 956, 958, 959,
+ 961, 963, 965, 967, 969, 970, 972, 974, 976, 978, 980,
+ 982, 983, 985, 987, 989, 991, 993, 995, 997, 998, 1000,
+ 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1015, 1017, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_20[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6,
+ 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
+ 14, 15, 15, 15, 15, 16, 16, 16, 16, 17, 17,
+ 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20,
+ 20, 20, 21, 21, 21, 21, 22, 22, 22, 23, 23,
+ 23, 23, 24, 24, 24, 25, 25, 25, 26, 26, 26,
+ 27, 27, 27, 28, 28, 28, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 32, 32, 32, 33, 33, 33, 34,
+ 34, 35, 35, 35, 36, 36, 36, 37, 37, 38, 38,
+ 38, 39, 39, 39, 40, 40, 41, 41, 41, 42, 42,
+ 43, 43, 44, 44, 44, 45, 45, 46, 46, 46, 47,
+ 47, 48, 48, 49, 49, 49, 50, 50, 51, 51, 52,
+ 52, 53, 53, 54, 54, 54, 55, 55, 56, 56, 57,
+ 57, 58, 58, 59, 59, 60, 60, 61, 61, 62, 62,
+ 63, 63, 64, 64, 65, 65, 66, 66, 67, 67, 68,
+ 68, 69, 69, 70, 70, 71, 71, 72, 72, 73, 73,
+ 74, 74, 75, 76, 76, 77, 77, 78, 78, 79, 79,
+ 80, 81, 81, 82, 82, 83, 83, 84, 84, 85, 86,
+ 86, 87, 87, 88, 89, 89, 90, 90, 91, 92, 92,
+ 93, 93, 94, 95, 95, 96, 96, 97, 98, 98, 99,
+ 99, 100, 101, 101, 102, 103, 103, 104, 105, 105, 106,
+ 106, 107, 108, 108, 109, 110, 110, 111, 112, 112, 113,
+ 114, 114, 115, 116, 116, 117, 118, 118, 119, 120, 120,
+ 121, 122, 122, 123, 124, 125, 125, 126, 127, 127, 128,
+ 129, 130, 130, 131, 132, 132, 133, 134, 135, 135, 136,
+ 137, 137, 138, 139, 140, 140, 141, 142, 143, 143, 144,
+ 145, 146, 146, 147, 148, 149, 149, 150, 151, 152, 153,
+ 153, 154, 155, 156, 156, 157, 158, 159, 160, 160, 161,
+ 162, 163, 164, 164, 165, 166, 167, 168, 168, 169, 170,
+ 171, 172, 172, 173, 174, 175, 176, 177, 177, 178, 179,
+ 180, 181, 182, 182, 183, 184, 185, 186, 187, 188, 188,
+ 189, 190, 191, 192, 193, 194, 194, 195, 196, 197, 198,
+ 199, 200, 201, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 210, 211, 212, 213, 214, 215, 216, 217, 218,
+ 219, 220, 221, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283,
+ 284, 285, 286, 287, 288, 289, 290, 291, 292, 294, 295,
+ 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 318, 319,
+ 320, 321, 322, 323, 324, 325, 327, 328, 329, 330, 331,
+ 332, 333, 335, 336, 337, 338, 339, 340, 341, 343, 344,
+ 345, 346, 347, 348, 350, 351, 352, 353, 354, 355, 357,
+ 358, 359, 360, 361, 363, 364, 365, 366, 367, 369, 370,
+ 371, 372, 373, 375, 376, 377, 378, 379, 381, 382, 383,
+ 384, 386, 387, 388, 389, 390, 392, 393, 394, 395, 397,
+ 398, 399, 400, 402, 403, 404, 405, 407, 408, 409, 410,
+ 412, 413, 414, 416, 417, 418, 419, 421, 422, 423, 425,
+ 426, 427, 428, 430, 431, 432, 434, 435, 436, 437, 439,
+ 440, 441, 443, 444, 445, 447, 448, 449, 451, 452, 453,
+ 455, 456, 457, 459, 460, 461, 463, 464, 465, 467, 468,
+ 469, 471, 472, 474, 475, 476, 478, 479, 480, 482, 483,
+ 484, 486, 487, 489, 490, 491, 493, 494, 496, 497, 498,
+ 500, 501, 503, 504, 505, 507, 508, 510, 511, 512, 514,
+ 515, 517, 518, 519, 521, 522, 524, 525, 527, 528, 530,
+ 531, 532, 534, 535, 537, 538, 540, 541, 543, 544, 545,
+ 547, 548, 550, 551, 553, 554, 556, 557, 559, 560, 562,
+ 563, 565, 566, 568, 569, 571, 572, 574, 575, 577, 578,
+ 580, 581, 583, 584, 586, 587, 589, 590, 592, 593, 595,
+ 596, 598, 599, 601, 602, 604, 605, 607, 609, 610, 612,
+ 613, 615, 616, 618, 619, 621, 622, 624, 626, 627, 629,
+ 630, 632, 633, 635, 637, 638, 640, 641, 643, 645, 646,
+ 648, 649, 651, 652, 654, 656, 657, 659, 660, 662, 664,
+ 665, 667, 669, 670, 672, 673, 675, 677, 678, 680, 682,
+ 683, 685, 686, 688, 690, 691, 693, 695, 696, 698, 700,
+ 701, 703, 705, 706, 708, 710, 711, 713, 715, 716, 718,
+ 720, 721, 723, 725, 726, 728, 730, 731, 733, 735, 736,
+ 738, 740, 742, 743, 745, 747, 748, 750, 752, 754, 755,
+ 757, 759, 760, 762, 764, 766, 767, 769, 771, 773, 774,
+ 776, 778, 780, 781, 783, 785, 787, 788, 790, 792, 794,
+ 795, 797, 799, 801, 802, 804, 806, 808, 809, 811, 813,
+ 815, 817, 818, 820, 822, 824, 826, 827, 829, 831, 833,
+ 835, 836, 838, 840, 842, 844, 845, 847, 849, 851, 853,
+ 855, 856, 858, 860, 862, 864, 866, 867, 869, 871, 873,
+ 875, 877, 878, 880, 882, 884, 886, 888, 890, 892, 893,
+ 895, 897, 899, 901, 903, 905, 907, 908, 910, 912, 914,
+ 916, 918, 920, 922, 924, 925, 927, 929, 931, 933, 935,
+ 937, 939, 941, 943, 945, 946, 948, 950, 952, 954, 956,
+ 958, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978,
+ 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999,
+ 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_21[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11,
+ 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 14,
+ 14, 14, 14, 15, 15, 15, 15, 15, 16, 16, 16,
+ 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19,
+ 19, 19, 20, 20, 20, 21, 21, 21, 21, 22, 22,
+ 22, 22, 23, 23, 23, 24, 24, 24, 24, 25, 25,
+ 25, 26, 26, 26, 27, 27, 27, 28, 28, 28, 29,
+ 29, 29, 29, 30, 30, 30, 31, 31, 31, 32, 32,
+ 33, 33, 33, 34, 34, 34, 35, 35, 35, 36, 36,
+ 36, 37, 37, 38, 38, 38, 39, 39, 39, 40, 40,
+ 41, 41, 41, 42, 42, 43, 43, 43, 44, 44, 45,
+ 45, 45, 46, 46, 47, 47, 47, 48, 48, 49, 49,
+ 50, 50, 50, 51, 51, 52, 52, 53, 53, 54, 54,
+ 54, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59,
+ 59, 60, 60, 61, 61, 62, 62, 63, 63, 64, 64,
+ 65, 65, 66, 66, 67, 67, 68, 68, 69, 69, 70,
+ 70, 71, 71, 72, 72, 73, 74, 74, 75, 75, 76,
+ 76, 77, 77, 78, 78, 79, 79, 80, 81, 81, 82,
+ 82, 83, 83, 84, 85, 85, 86, 86, 87, 87, 88,
+ 89, 89, 90, 90, 91, 91, 92, 93, 93, 94, 94,
+ 95, 96, 96, 97, 97, 98, 99, 99, 100, 101, 101,
+ 102, 102, 103, 104, 104, 105, 106, 106, 107, 108, 108,
+ 109, 110, 110, 111, 111, 112, 113, 113, 114, 115, 115,
+ 116, 117, 117, 118, 119, 120, 120, 121, 122, 122, 123,
+ 124, 124, 125, 126, 126, 127, 128, 129, 129, 130, 131,
+ 131, 132, 133, 134, 134, 135, 136, 136, 137, 138, 139,
+ 139, 140, 141, 142, 142, 143, 144, 145, 145, 146, 147,
+ 148, 148, 149, 150, 151, 152, 152, 153, 154, 155, 155,
+ 156, 157, 158, 159, 159, 160, 161, 162, 163, 163, 164,
+ 165, 166, 167, 167, 168, 169, 170, 171, 171, 172, 173,
+ 174, 175, 176, 176, 177, 178, 179, 180, 181, 181, 182,
+ 183, 184, 185, 186, 187, 187, 188, 189, 190, 191, 192,
+ 193, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243,
+ 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 300, 301,
+ 302, 303, 304, 305, 306, 307, 308, 310, 311, 312, 313,
+ 314, 315, 316, 317, 319, 320, 321, 322, 323, 324, 326,
+ 327, 328, 329, 330, 331, 332, 334, 335, 336, 337, 338,
+ 339, 341, 342, 343, 344, 345, 347, 348, 349, 350, 351,
+ 353, 354, 355, 356, 357, 359, 360, 361, 362, 363, 365,
+ 366, 367, 368, 370, 371, 372, 373, 375, 376, 377, 378,
+ 380, 381, 382, 383, 385, 386, 387, 388, 390, 391, 392,
+ 393, 395, 396, 397, 399, 400, 401, 402, 404, 405, 406,
+ 408, 409, 410, 411, 413, 414, 415, 417, 418, 419, 421,
+ 422, 423, 425, 426, 427, 429, 430, 431, 433, 434, 435,
+ 437, 438, 439, 441, 442, 443, 445, 446, 447, 449, 450,
+ 452, 453, 454, 456, 457, 458, 460, 461, 463, 464, 465,
+ 467, 468, 469, 471, 472, 474, 475, 477, 478, 479, 481,
+ 482, 484, 485, 486, 488, 489, 491, 492, 494, 495, 496,
+ 498, 499, 501, 502, 504, 505, 507, 508, 509, 511, 512,
+ 514, 515, 517, 518, 520, 521, 523, 524, 526, 527, 529,
+ 530, 532, 533, 535, 536, 538, 539, 541, 542, 544, 545,
+ 547, 548, 550, 551, 553, 554, 556, 557, 559, 560, 562,
+ 563, 565, 566, 568, 569, 571, 573, 574, 576, 577, 579,
+ 580, 582, 583, 585, 587, 588, 590, 591, 593, 595, 596,
+ 598, 599, 601, 602, 604, 606, 607, 609, 610, 612, 614,
+ 615, 617, 618, 620, 622, 623, 625, 627, 628, 630, 631,
+ 633, 635, 636, 638, 640, 641, 643, 645, 646, 648, 650,
+ 651, 653, 654, 656, 658, 659, 661, 663, 664, 666, 668,
+ 670, 671, 673, 675, 676, 678, 680, 681, 683, 685, 686,
+ 688, 690, 692, 693, 695, 697, 698, 700, 702, 704, 705,
+ 707, 709, 711, 712, 714, 716, 717, 719, 721, 723, 724,
+ 726, 728, 730, 732, 733, 735, 737, 739, 740, 742, 744,
+ 746, 747, 749, 751, 753, 755, 756, 758, 760, 762, 764,
+ 765, 767, 769, 771, 773, 774, 776, 778, 780, 782, 784,
+ 785, 787, 789, 791, 793, 795, 796, 798, 800, 802, 804,
+ 806, 807, 809, 811, 813, 815, 817, 819, 821, 822, 824,
+ 826, 828, 830, 832, 834, 836, 837, 839, 841, 843, 845,
+ 847, 849, 851, 853, 855, 856, 858, 860, 862, 864, 866,
+ 868, 870, 872, 874, 876, 878, 880, 882, 883, 885, 887,
+ 889, 891, 893, 895, 897, 899, 901, 903, 905, 907, 909,
+ 911, 913, 915, 917, 919, 921, 923, 925, 927, 929, 931,
+ 933, 935, 937, 939, 941, 943, 945, 947, 949, 951, 953,
+ 955, 957, 959, 961, 963, 965, 967, 969, 971, 973, 975,
+ 977, 979, 981, 984, 986, 988, 990, 992, 994, 996, 998,
+ 1000, 1002, 1004, 1006, 1008, 1010, 1013, 1015, 1017, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_22[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11,
+ 11, 11, 12, 12, 12, 12, 12, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 15, 15, 15, 15, 15, 16,
+ 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
+ 18, 19, 19, 19, 19, 20, 20, 20, 21, 21, 21,
+ 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24,
+ 24, 25, 25, 25, 25, 26, 26, 26, 27, 27, 27,
+ 28, 28, 28, 29, 29, 29, 29, 30, 30, 30, 31,
+ 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34,
+ 35, 35, 35, 36, 36, 37, 37, 37, 38, 38, 38,
+ 39, 39, 39, 40, 40, 41, 41, 41, 42, 42, 43,
+ 43, 43, 44, 44, 44, 45, 45, 46, 46, 46, 47,
+ 47, 48, 48, 49, 49, 49, 50, 50, 51, 51, 52,
+ 52, 52, 53, 53, 54, 54, 55, 55, 55, 56, 56,
+ 57, 57, 58, 58, 59, 59, 60, 60, 61, 61, 61,
+ 62, 62, 63, 63, 64, 64, 65, 65, 66, 66, 67,
+ 67, 68, 68, 69, 69, 70, 70, 71, 71, 72, 72,
+ 73, 73, 74, 75, 75, 76, 76, 77, 77, 78, 78,
+ 79, 79, 80, 80, 81, 82, 82, 83, 83, 84, 84,
+ 85, 85, 86, 87, 87, 88, 88, 89, 89, 90, 91,
+ 91, 92, 92, 93, 94, 94, 95, 95, 96, 97, 97,
+ 98, 98, 99, 100, 100, 101, 102, 102, 103, 103, 104,
+ 105, 105, 106, 107, 107, 108, 109, 109, 110, 110, 111,
+ 112, 112, 113, 114, 114, 115, 116, 116, 117, 118, 118,
+ 119, 120, 121, 121, 122, 123, 123, 124, 125, 125, 126,
+ 127, 127, 128, 129, 130, 130, 131, 132, 132, 133, 134,
+ 135, 135, 136, 137, 138, 138, 139, 140, 141, 141, 142,
+ 143, 144, 144, 145, 146, 147, 147, 148, 149, 150, 150,
+ 151, 152, 153, 154, 154, 155, 156, 157, 157, 158, 159,
+ 160, 161, 161, 162, 163, 164, 165, 166, 166, 167, 168,
+ 169, 170, 170, 171, 172, 173, 174, 175, 175, 176, 177,
+ 178, 179, 180, 181, 181, 182, 183, 184, 185, 186, 187,
+ 187, 188, 189, 190, 191, 192, 193, 194, 194, 195, 196,
+ 197, 198, 199, 200, 201, 202, 203, 203, 204, 205, 206,
+ 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 265, 266, 267, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 294, 295, 296,
+ 297, 298, 299, 300, 301, 303, 304, 305, 306, 307, 308,
+ 309, 311, 312, 313, 314, 315, 316, 317, 319, 320, 321,
+ 322, 323, 324, 326, 327, 328, 329, 330, 332, 333, 334,
+ 335, 336, 338, 339, 340, 341, 342, 344, 345, 346, 347,
+ 348, 350, 351, 352, 353, 355, 356, 357, 358, 360, 361,
+ 362, 363, 365, 366, 367, 368, 370, 371, 372, 373, 375,
+ 376, 377, 378, 380, 381, 382, 384, 385, 386, 387, 389,
+ 390, 391, 393, 394, 395, 397, 398, 399, 401, 402, 403,
+ 405, 406, 407, 409, 410, 411, 413, 414, 415, 417, 418,
+ 419, 421, 422, 423, 425, 426, 427, 429, 430, 432, 433,
+ 434, 436, 437, 438, 440, 441, 443, 444, 445, 447, 448,
+ 450, 451, 452, 454, 455, 457, 458, 459, 461, 462, 464,
+ 465, 467, 468, 469, 471, 472, 474, 475, 477, 478, 480,
+ 481, 483, 484, 485, 487, 488, 490, 491, 493, 494, 496,
+ 497, 499, 500, 502, 503, 505, 506, 508, 509, 511, 512,
+ 514, 515, 517, 518, 520, 521, 523, 524, 526, 527, 529,
+ 530, 532, 534, 535, 537, 538, 540, 541, 543, 544, 546,
+ 548, 549, 551, 552, 554, 555, 557, 559, 560, 562, 563,
+ 565, 567, 568, 570, 571, 573, 575, 576, 578, 579, 581,
+ 583, 584, 586, 587, 589, 591, 592, 594, 596, 597, 599,
+ 601, 602, 604, 605, 607, 609, 610, 612, 614, 615, 617,
+ 619, 620, 622, 624, 625, 627, 629, 631, 632, 634, 636,
+ 637, 639, 641, 642, 644, 646, 648, 649, 651, 653, 654,
+ 656, 658, 660, 661, 663, 665, 667, 668, 670, 672, 674,
+ 675, 677, 679, 681, 682, 684, 686, 688, 689, 691, 693,
+ 695, 697, 698, 700, 702, 704, 705, 707, 709, 711, 713,
+ 714, 716, 718, 720, 722, 724, 725, 727, 729, 731, 733,
+ 735, 736, 738, 740, 742, 744, 746, 747, 749, 751, 753,
+ 755, 757, 759, 760, 762, 764, 766, 768, 770, 772, 774,
+ 776, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795,
+ 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816,
+ 818, 820, 822, 824, 826, 828, 829, 831, 833, 835, 837,
+ 839, 841, 843, 845, 847, 849, 851, 853, 855, 857, 859,
+ 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881,
+ 883, 885, 887, 889, 892, 894, 896, 898, 900, 902, 904,
+ 906, 908, 910, 912, 914, 916, 918, 920, 922, 925, 927,
+ 929, 931, 933, 935, 937, 939, 941, 943, 945, 948, 950,
+ 952, 954, 956, 958, 960, 962, 965, 967, 969, 971, 973,
+ 975, 977, 980, 982, 984, 986, 988, 990, 992, 995, 997,
+ 999, 1001, 1003, 1005, 1008, 1010, 1012, 1014, 1016, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_23[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11,
+ 11, 11, 11, 12, 12, 12, 12, 12, 13, 13, 13,
+ 13, 13, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18,
+ 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20,
+ 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23,
+ 23, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26,
+ 27, 27, 27, 27, 28, 28, 28, 29, 29, 29, 30,
+ 30, 30, 30, 31, 31, 31, 32, 32, 32, 33, 33,
+ 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37,
+ 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41,
+ 41, 42, 42, 42, 43, 43, 43, 44, 44, 45, 45,
+ 45, 46, 46, 47, 47, 47, 48, 48, 49, 49, 49,
+ 50, 50, 51, 51, 52, 52, 52, 53, 53, 54, 54,
+ 55, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59,
+ 59, 60, 60, 61, 61, 62, 62, 63, 63, 64, 64,
+ 65, 65, 66, 66, 67, 67, 68, 68, 69, 69, 70,
+ 70, 71, 71, 72, 72, 73, 73, 74, 74, 75, 75,
+ 76, 76, 77, 77, 78, 78, 79, 80, 80, 81, 81,
+ 82, 82, 83, 83, 84, 85, 85, 86, 86, 87, 87,
+ 88, 89, 89, 90, 90, 91, 91, 92, 93, 93, 94,
+ 94, 95, 96, 96, 97, 97, 98, 99, 99, 100, 100,
+ 101, 102, 102, 103, 104, 104, 105, 106, 106, 107, 107,
+ 108, 109, 109, 110, 111, 111, 112, 113, 113, 114, 115,
+ 115, 116, 117, 117, 118, 119, 119, 120, 121, 121, 122,
+ 123, 124, 124, 125, 126, 126, 127, 128, 128, 129, 130,
+ 131, 131, 132, 133, 133, 134, 135, 136, 136, 137, 138,
+ 139, 139, 140, 141, 142, 142, 143, 144, 145, 145, 146,
+ 147, 148, 148, 149, 150, 151, 152, 152, 153, 154, 155,
+ 156, 156, 157, 158, 159, 160, 160, 161, 162, 163, 164,
+ 164, 165, 166, 167, 168, 168, 169, 170, 171, 172, 173,
+ 174, 174, 175, 176, 177, 178, 179, 179, 180, 181, 182,
+ 183, 184, 185, 186, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 248, 249, 250, 251, 252, 253, 254, 255, 256,
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 280,
+ 281, 282, 283, 284, 285, 286, 287, 288, 290, 291, 292,
+ 293, 294, 295, 296, 298, 299, 300, 301, 302, 303, 304,
+ 306, 307, 308, 309, 310, 311, 313, 314, 315, 316, 317,
+ 319, 320, 321, 322, 323, 325, 326, 327, 328, 329, 331,
+ 332, 333, 334, 335, 337, 338, 339, 340, 342, 343, 344,
+ 345, 347, 348, 349, 350, 352, 353, 354, 355, 357, 358,
+ 359, 360, 362, 363, 364, 366, 367, 368, 369, 371, 372,
+ 373, 375, 376, 377, 379, 380, 381, 383, 384, 385, 386,
+ 388, 389, 390, 392, 393, 394, 396, 397, 399, 400, 401,
+ 403, 404, 405, 407, 408, 409, 411, 412, 414, 415, 416,
+ 418, 419, 420, 422, 423, 425, 426, 427, 429, 430, 432,
+ 433, 435, 436, 437, 439, 440, 442, 443, 444, 446, 447,
+ 449, 450, 452, 453, 455, 456, 458, 459, 460, 462, 463,
+ 465, 466, 468, 469, 471, 472, 474, 475, 477, 478, 480,
+ 481, 483, 484, 486, 487, 489, 490, 492, 493, 495, 496,
+ 498, 499, 501, 502, 504, 506, 507, 509, 510, 512, 513,
+ 515, 516, 518, 520, 521, 523, 524, 526, 527, 529, 531,
+ 532, 534, 535, 537, 539, 540, 542, 543, 545, 547, 548,
+ 550, 551, 553, 555, 556, 558, 560, 561, 563, 565, 566,
+ 568, 569, 571, 573, 574, 576, 578, 579, 581, 583, 584,
+ 586, 588, 590, 591, 593, 595, 596, 598, 600, 601, 603,
+ 605, 606, 608, 610, 612, 613, 615, 617, 619, 620, 622,
+ 624, 625, 627, 629, 631, 632, 634, 636, 638, 640, 641,
+ 643, 645, 647, 648, 650, 652, 654, 655, 657, 659, 661,
+ 663, 664, 666, 668, 670, 672, 674, 675, 677, 679, 681,
+ 683, 684, 686, 688, 690, 692, 694, 696, 697, 699, 701,
+ 703, 705, 707, 709, 710, 712, 714, 716, 718, 720, 722,
+ 724, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743,
+ 745, 746, 748, 750, 752, 754, 756, 758, 760, 762, 764,
+ 766, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786,
+ 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807,
+ 809, 811, 814, 816, 818, 820, 822, 824, 826, 828, 830,
+ 832, 834, 836, 838, 840, 842, 844, 846, 848, 850, 852,
+ 854, 857, 859, 861, 863, 865, 867, 869, 871, 873, 875,
+ 878, 880, 882, 884, 886, 888, 890, 892, 894, 897, 899,
+ 901, 903, 905, 907, 909, 912, 914, 916, 918, 920, 922,
+ 925, 927, 929, 931, 933, 936, 938, 940, 942, 944, 946,
+ 949, 951, 953, 955, 958, 960, 962, 964, 966, 969, 971,
+ 973, 975, 978, 980, 982, 984, 987, 989, 991, 993, 996,
+ 998, 1000, 1002, 1005, 1007, 1009, 1012, 1014, 1016, 1018, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_24[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9,
+ 9, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11,
+ 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13,
+ 13, 13, 13, 13, 14, 14, 14, 14, 14, 15, 15,
+ 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 19, 19, 19, 19, 20,
+ 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22,
+ 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25,
+ 26, 26, 26, 26, 27, 27, 27, 28, 28, 28, 28,
+ 29, 29, 29, 30, 30, 30, 31, 31, 31, 32, 32,
+ 32, 32, 33, 33, 33, 34, 34, 34, 35, 35, 35,
+ 36, 36, 36, 37, 37, 38, 38, 38, 39, 39, 39,
+ 40, 40, 40, 41, 41, 41, 42, 42, 43, 43, 43,
+ 44, 44, 44, 45, 45, 46, 46, 46, 47, 47, 48,
+ 48, 48, 49, 49, 50, 50, 50, 51, 51, 52, 52,
+ 53, 53, 53, 54, 54, 55, 55, 56, 56, 56, 57,
+ 57, 58, 58, 59, 59, 60, 60, 61, 61, 61, 62,
+ 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67,
+ 68, 68, 69, 69, 70, 70, 71, 71, 72, 72, 73,
+ 73, 74, 74, 75, 75, 76, 76, 77, 77, 78, 79,
+ 79, 80, 80, 81, 81, 82, 82, 83, 83, 84, 85,
+ 85, 86, 86, 87, 87, 88, 89, 89, 90, 90, 91,
+ 91, 92, 93, 93, 94, 94, 95, 96, 96, 97, 97,
+ 98, 99, 99, 100, 100, 101, 102, 102, 103, 104, 104,
+ 105, 106, 106, 107, 107, 108, 109, 109, 110, 111, 111,
+ 112, 113, 113, 114, 115, 115, 116, 117, 117, 118, 119,
+ 119, 120, 121, 121, 122, 123, 124, 124, 125, 126, 126,
+ 127, 128, 129, 129, 130, 131, 131, 132, 133, 134, 134,
+ 135, 136, 137, 137, 138, 139, 140, 140, 141, 142, 143,
+ 143, 144, 145, 146, 146, 147, 148, 149, 149, 150, 151,
+ 152, 153, 153, 154, 155, 156, 157, 157, 158, 159, 160,
+ 161, 161, 162, 163, 164, 165, 166, 166, 167, 168, 169,
+ 170, 171, 171, 172, 173, 174, 175, 176, 177, 177, 178,
+ 179, 180, 181, 182, 183, 184, 184, 185, 186, 187, 188,
+ 189, 190, 191, 192, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 279, 280, 281, 282, 283, 284, 285, 287, 288, 289,
+ 290, 291, 292, 293, 295, 296, 297, 298, 299, 300, 302,
+ 303, 304, 305, 306, 308, 309, 310, 311, 312, 314, 315,
+ 316, 317, 318, 320, 321, 322, 323, 324, 326, 327, 328,
+ 329, 331, 332, 333, 334, 336, 337, 338, 339, 341, 342,
+ 343, 344, 346, 347, 348, 350, 351, 352, 353, 355, 356,
+ 357, 359, 360, 361, 363, 364, 365, 367, 368, 369, 370,
+ 372, 373, 374, 376, 377, 378, 380, 381, 383, 384, 385,
+ 387, 388, 389, 391, 392, 393, 395, 396, 398, 399, 400,
+ 402, 403, 405, 406, 407, 409, 410, 412, 413, 414, 416,
+ 417, 419, 420, 421, 423, 424, 426, 427, 429, 430, 432,
+ 433, 434, 436, 437, 439, 440, 442, 443, 445, 446, 448,
+ 449, 451, 452, 454, 455, 457, 458, 460, 461, 463, 464,
+ 466, 467, 469, 470, 472, 473, 475, 476, 478, 479, 481,
+ 483, 484, 486, 487, 489, 490, 492, 493, 495, 497, 498,
+ 500, 501, 503, 505, 506, 508, 509, 511, 512, 514, 516,
+ 517, 519, 521, 522, 524, 525, 527, 529, 530, 532, 534,
+ 535, 537, 539, 540, 542, 543, 545, 547, 548, 550, 552,
+ 553, 555, 557, 559, 560, 562, 564, 565, 567, 569, 570,
+ 572, 574, 576, 577, 579, 581, 582, 584, 586, 588, 589,
+ 591, 593, 595, 596, 598, 600, 602, 603, 605, 607, 609,
+ 610, 612, 614, 616, 618, 619, 621, 623, 625, 627, 628,
+ 630, 632, 634, 636, 637, 639, 641, 643, 645, 647, 648,
+ 650, 652, 654, 656, 658, 660, 661, 663, 665, 667, 669,
+ 671, 673, 674, 676, 678, 680, 682, 684, 686, 688, 690,
+ 692, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711,
+ 713, 715, 717, 719, 721, 723, 724, 726, 728, 730, 732,
+ 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754,
+ 756, 758, 760, 762, 764, 766, 768, 770, 772, 774, 777,
+ 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799,
+ 801, 803, 805, 808, 810, 812, 814, 816, 818, 820, 822,
+ 824, 826, 829, 831, 833, 835, 837, 839, 841, 844, 846,
+ 848, 850, 852, 854, 856, 859, 861, 863, 865, 867, 870,
+ 872, 874, 876, 878, 880, 883, 885, 887, 889, 891, 894,
+ 896, 898, 900, 903, 905, 907, 909, 912, 914, 916, 918,
+ 921, 923, 925, 927, 930, 932, 934, 936, 939, 941, 943,
+ 946, 948, 950, 952, 955, 957, 959, 962, 964, 966, 969,
+ 971, 973, 976, 978, 980, 983, 985, 987, 990, 992, 994,
+ 997, 999, 1002, 1004, 1006, 1009, 1011, 1013, 1016, 1018, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_25[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9,
+ 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 11,
+ 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
+ 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
+ 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17,
+ 17, 17, 17, 18, 18, 18, 18, 18, 19, 19, 19,
+ 19, 20, 20, 20, 20, 20, 21, 21, 21, 21, 22,
+ 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 25,
+ 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 28,
+ 28, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31,
+ 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34,
+ 35, 35, 35, 36, 36, 36, 37, 37, 37, 38, 38,
+ 38, 39, 39, 39, 40, 40, 40, 41, 41, 42, 42,
+ 42, 43, 43, 43, 44, 44, 45, 45, 45, 46, 46,
+ 46, 47, 47, 48, 48, 48, 49, 49, 50, 50, 50,
+ 51, 51, 52, 52, 53, 53, 53, 54, 54, 55, 55,
+ 56, 56, 56, 57, 57, 58, 58, 59, 59, 60, 60,
+ 60, 61, 61, 62, 62, 63, 63, 64, 64, 65, 65,
+ 66, 66, 67, 67, 68, 68, 69, 69, 70, 70, 71,
+ 71, 72, 72, 73, 73, 74, 74, 75, 75, 76, 76,
+ 77, 77, 78, 78, 79, 79, 80, 80, 81, 82, 82,
+ 83, 83, 84, 84, 85, 85, 86, 87, 87, 88, 88,
+ 89, 89, 90, 91, 91, 92, 92, 93, 94, 94, 95,
+ 95, 96, 97, 97, 98, 98, 99, 100, 100, 101, 102,
+ 102, 103, 103, 104, 105, 105, 106, 107, 107, 108, 109,
+ 109, 110, 110, 111, 112, 112, 113, 114, 114, 115, 116,
+ 117, 117, 118, 119, 119, 120, 121, 121, 122, 123, 123,
+ 124, 125, 126, 126, 127, 128, 128, 129, 130, 131, 131,
+ 132, 133, 133, 134, 135, 136, 136, 137, 138, 139, 139,
+ 140, 141, 142, 143, 143, 144, 145, 146, 146, 147, 148,
+ 149, 149, 150, 151, 152, 153, 153, 154, 155, 156, 157,
+ 158, 158, 159, 160, 161, 162, 162, 163, 164, 165, 166,
+ 167, 167, 168, 169, 170, 171, 172, 173, 173, 174, 175,
+ 176, 177, 178, 179, 180, 180, 181, 182, 183, 184, 185,
+ 186, 187, 188, 188, 189, 190, 191, 192, 193, 194, 195,
+ 196, 197, 198, 199, 200, 200, 201, 202, 203, 204, 205,
+ 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 247, 248, 249, 250,
+ 251, 252, 253, 254, 255, 256, 257, 258, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 270, 271, 272, 273, 274,
+ 275, 276, 277, 279, 280, 281, 282, 283, 284, 286, 287,
+ 288, 289, 290, 291, 293, 294, 295, 296, 297, 298, 300,
+ 301, 302, 303, 304, 306, 307, 308, 309, 311, 312, 313,
+ 314, 315, 317, 318, 319, 320, 322, 323, 324, 325, 327,
+ 328, 329, 330, 332, 333, 334, 336, 337, 338, 339, 341,
+ 342, 343, 345, 346, 347, 349, 350, 351, 352, 354, 355,
+ 356, 358, 359, 360, 362, 363, 364, 366, 367, 369, 370,
+ 371, 373, 374, 375, 377, 378, 379, 381, 382, 384, 385,
+ 386, 388, 389, 391, 392, 393, 395, 396, 398, 399, 400,
+ 402, 403, 405, 406, 408, 409, 411, 412, 413, 415, 416,
+ 418, 419, 421, 422, 424, 425, 427, 428, 430, 431, 433,
+ 434, 436, 437, 439, 440, 442, 443, 445, 446, 448, 449,
+ 451, 452, 454, 455, 457, 458, 460, 461, 463, 465, 466,
+ 468, 469, 471, 472, 474, 476, 477, 479, 480, 482, 483,
+ 485, 487, 488, 490, 491, 493, 495, 496, 498, 500, 501,
+ 503, 504, 506, 508, 509, 511, 513, 514, 516, 518, 519,
+ 521, 523, 524, 526, 528, 529, 531, 533, 534, 536, 538,
+ 540, 541, 543, 545, 546, 548, 550, 552, 553, 555, 557,
+ 558, 560, 562, 564, 565, 567, 569, 571, 572, 574, 576,
+ 578, 580, 581, 583, 585, 587, 588, 590, 592, 594, 596,
+ 597, 599, 601, 603, 605, 607, 608, 610, 612, 614, 616,
+ 618, 619, 621, 623, 625, 627, 629, 631, 632, 634, 636,
+ 638, 640, 642, 644, 646, 648, 649, 651, 653, 655, 657,
+ 659, 661, 663, 665, 667, 669, 671, 673, 674, 676, 678,
+ 680, 682, 684, 686, 688, 690, 692, 694, 696, 698, 700,
+ 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722,
+ 724, 726, 728, 730, 732, 734, 736, 739, 741, 743, 745,
+ 747, 749, 751, 753, 755, 757, 759, 761, 763, 766, 768,
+ 770, 772, 774, 776, 778, 780, 782, 785, 787, 789, 791,
+ 793, 795, 797, 800, 802, 804, 806, 808, 810, 813, 815,
+ 817, 819, 821, 824, 826, 828, 830, 832, 835, 837, 839,
+ 841, 843, 846, 848, 850, 852, 855, 857, 859, 861, 864,
+ 866, 868, 870, 873, 875, 877, 880, 882, 884, 886, 889,
+ 891, 893, 896, 898, 900, 903, 905, 907, 910, 912, 914,
+ 917, 919, 921, 924, 926, 928, 931, 933, 935, 938, 940,
+ 942, 945, 947, 950, 952, 954, 957, 959, 962, 964, 966,
+ 969, 971, 974, 976, 979, 981, 983, 986, 988, 991, 993,
+ 996, 998, 1001, 1003, 1006, 1008, 1011, 1013, 1016, 1018, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_26[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14,
+ 14, 14, 15, 15, 15, 15, 15, 16, 16, 16, 16,
+ 16, 17, 17, 17, 17, 18, 18, 18, 18, 18, 19,
+ 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21,
+ 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24,
+ 24, 24, 25, 25, 25, 25, 26, 26, 26, 27, 27,
+ 27, 27, 28, 28, 28, 28, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37,
+ 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41,
+ 41, 41, 42, 42, 43, 43, 43, 44, 44, 44, 45,
+ 45, 46, 46, 46, 47, 47, 47, 48, 48, 49, 49,
+ 49, 50, 50, 51, 51, 51, 52, 52, 53, 53, 54,
+ 54, 54, 55, 55, 56, 56, 57, 57, 57, 58, 58,
+ 59, 59, 60, 60, 61, 61, 62, 62, 62, 63, 63,
+ 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69,
+ 69, 70, 70, 71, 71, 72, 72, 73, 73, 74, 74,
+ 75, 75, 76, 76, 77, 77, 78, 78, 79, 80, 80,
+ 81, 81, 82, 82, 83, 83, 84, 84, 85, 86, 86,
+ 87, 87, 88, 88, 89, 90, 90, 91, 91, 92, 93,
+ 93, 94, 94, 95, 96, 96, 97, 97, 98, 99, 99,
+ 100, 100, 101, 102, 102, 103, 104, 104, 105, 106, 106,
+ 107, 107, 108, 109, 109, 110, 111, 111, 112, 113, 113,
+ 114, 115, 115, 116, 117, 117, 118, 119, 120, 120, 121,
+ 122, 122, 123, 124, 124, 125, 126, 127, 127, 128, 129,
+ 129, 130, 131, 132, 132, 133, 134, 135, 135, 136, 137,
+ 138, 138, 139, 140, 141, 141, 142, 143, 144, 145, 145,
+ 146, 147, 148, 149, 149, 150, 151, 152, 153, 153, 154,
+ 155, 156, 157, 157, 158, 159, 160, 161, 162, 162, 163,
+ 164, 165, 166, 167, 167, 168, 169, 170, 171, 172, 173,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203,
+ 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236,
+ 237, 238, 239, 240, 241, 242, 243, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 266, 267, 268, 269, 270, 271, 272,
+ 274, 275, 276, 277, 278, 279, 281, 282, 283, 284, 285,
+ 286, 288, 289, 290, 291, 292, 294, 295, 296, 297, 299,
+ 300, 301, 302, 303, 305, 306, 307, 308, 310, 311, 312,
+ 313, 315, 316, 317, 318, 320, 321, 322, 323, 325, 326,
+ 327, 329, 330, 331, 333, 334, 335, 336, 338, 339, 340,
+ 342, 343, 344, 346, 347, 348, 350, 351, 352, 354, 355,
+ 356, 358, 359, 361, 362, 363, 365, 366, 367, 369, 370,
+ 372, 373, 374, 376, 377, 379, 380, 381, 383, 384, 386,
+ 387, 389, 390, 391, 393, 394, 396, 397, 399, 400, 402,
+ 403, 405, 406, 407, 409, 410, 412, 413, 415, 416, 418,
+ 419, 421, 422, 424, 425, 427, 428, 430, 432, 433, 435,
+ 436, 438, 439, 441, 442, 444, 445, 447, 449, 450, 452,
+ 453, 455, 456, 458, 460, 461, 463, 464, 466, 468, 469,
+ 471, 472, 474, 476, 477, 479, 481, 482, 484, 485, 487,
+ 489, 490, 492, 494, 495, 497, 499, 500, 502, 504, 505,
+ 507, 509, 510, 512, 514, 516, 517, 519, 521, 522, 524,
+ 526, 528, 529, 531, 533, 535, 536, 538, 540, 542, 543,
+ 545, 547, 549, 550, 552, 554, 556, 558, 559, 561, 563,
+ 565, 567, 568, 570, 572, 574, 576, 577, 579, 581, 583,
+ 585, 587, 588, 590, 592, 594, 596, 598, 600, 601, 603,
+ 605, 607, 609, 611, 613, 615, 617, 619, 620, 622, 624,
+ 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646,
+ 648, 650, 651, 653, 655, 657, 659, 661, 663, 665, 667,
+ 669, 671, 673, 675, 677, 679, 681, 683, 685, 688, 690,
+ 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712,
+ 714, 716, 718, 721, 723, 725, 727, 729, 731, 733, 735,
+ 737, 740, 742, 744, 746, 748, 750, 752, 755, 757, 759,
+ 761, 763, 765, 768, 770, 772, 774, 776, 779, 781, 783,
+ 785, 787, 790, 792, 794, 796, 798, 801, 803, 805, 807,
+ 810, 812, 814, 816, 819, 821, 823, 826, 828, 830, 832,
+ 835, 837, 839, 842, 844, 846, 849, 851, 853, 855, 858,
+ 860, 862, 865, 867, 870, 872, 874, 877, 879, 881, 884,
+ 886, 888, 891, 893, 896, 898, 900, 903, 905, 908, 910,
+ 913, 915, 917, 920, 922, 925, 927, 930, 932, 934, 937,
+ 939, 942, 944, 947, 949, 952, 954, 957, 959, 962, 964,
+ 967, 969, 972, 974, 977, 979, 982, 984, 987, 990, 992,
+ 995, 997, 1000, 1002, 1005, 1007, 1010, 1013, 1015, 1018, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_27[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12,
+ 12, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14,
+ 14, 14, 14, 15, 15, 15, 15, 15, 16, 16, 16,
+ 16, 16, 17, 17, 17, 17, 17, 18, 18, 18, 18,
+ 18, 19, 19, 19, 19, 20, 20, 20, 20, 20, 21,
+ 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23,
+ 24, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26,
+ 26, 27, 27, 27, 27, 28, 28, 28, 29, 29, 29,
+ 29, 30, 30, 30, 31, 31, 31, 32, 32, 32, 32,
+ 33, 33, 33, 34, 34, 34, 35, 35, 35, 36, 36,
+ 36, 37, 37, 37, 38, 38, 38, 39, 39, 39, 40,
+ 40, 40, 41, 41, 41, 42, 42, 43, 43, 43, 44,
+ 44, 44, 45, 45, 46, 46, 46, 47, 47, 47, 48,
+ 48, 49, 49, 49, 50, 50, 51, 51, 51, 52, 52,
+ 53, 53, 54, 54, 54, 55, 55, 56, 56, 57, 57,
+ 57, 58, 58, 59, 59, 60, 60, 61, 61, 61, 62,
+ 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67,
+ 68, 68, 69, 69, 70, 70, 71, 71, 72, 72, 73,
+ 73, 74, 74, 75, 75, 76, 76, 77, 77, 78, 78,
+ 79, 79, 80, 81, 81, 82, 82, 83, 83, 84, 84,
+ 85, 86, 86, 87, 87, 88, 88, 89, 90, 90, 91,
+ 91, 92, 92, 93, 94, 94, 95, 95, 96, 97, 97,
+ 98, 99, 99, 100, 100, 101, 102, 102, 103, 104, 104,
+ 105, 105, 106, 107, 107, 108, 109, 109, 110, 111, 111,
+ 112, 113, 113, 114, 115, 115, 116, 117, 118, 118, 119,
+ 120, 120, 121, 122, 122, 123, 124, 125, 125, 126, 127,
+ 127, 128, 129, 130, 130, 131, 132, 133, 133, 134, 135,
+ 136, 136, 137, 138, 139, 139, 140, 141, 142, 143, 143,
+ 144, 145, 146, 146, 147, 148, 149, 150, 150, 151, 152,
+ 153, 154, 155, 155, 156, 157, 158, 159, 160, 160, 161,
+ 162, 163, 164, 165, 165, 166, 167, 168, 169, 170, 171,
+ 172, 172, 173, 174, 175, 176, 177, 178, 179, 180, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 256, 257, 258, 259,
+ 260, 261, 262, 263, 265, 266, 267, 268, 269, 270, 272,
+ 273, 274, 275, 276, 278, 279, 280, 281, 282, 283, 285,
+ 286, 287, 288, 290, 291, 292, 293, 294, 296, 297, 298,
+ 299, 301, 302, 303, 304, 306, 307, 308, 309, 311, 312,
+ 313, 315, 316, 317, 318, 320, 321, 322, 324, 325, 326,
+ 328, 329, 330, 332, 333, 334, 336, 337, 338, 340, 341,
+ 342, 344, 345, 346, 348, 349, 351, 352, 353, 355, 356,
+ 357, 359, 360, 362, 363, 364, 366, 367, 369, 370, 372,
+ 373, 374, 376, 377, 379, 380, 382, 383, 385, 386, 387,
+ 389, 390, 392, 393, 395, 396, 398, 399, 401, 402, 404,
+ 405, 407, 408, 410, 411, 413, 414, 416, 417, 419, 421,
+ 422, 424, 425, 427, 428, 430, 431, 433, 435, 436, 438,
+ 439, 441, 442, 444, 446, 447, 449, 450, 452, 454, 455,
+ 457, 459, 460, 462, 463, 465, 467, 468, 470, 472, 473,
+ 475, 477, 478, 480, 482, 483, 485, 487, 488, 490, 492,
+ 494, 495, 497, 499, 500, 502, 504, 506, 507, 509, 511,
+ 513, 514, 516, 518, 520, 521, 523, 525, 527, 528, 530,
+ 532, 534, 536, 537, 539, 541, 543, 545, 546, 548, 550,
+ 552, 554, 556, 557, 559, 561, 563, 565, 567, 569, 570,
+ 572, 574, 576, 578, 580, 582, 584, 586, 587, 589, 591,
+ 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613,
+ 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634,
+ 636, 638, 640, 642, 644, 646, 648, 650, 652, 654, 656,
+ 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679,
+ 681, 683, 685, 688, 690, 692, 694, 696, 698, 700, 702,
+ 705, 707, 709, 711, 713, 715, 717, 720, 722, 724, 726,
+ 728, 730, 733, 735, 737, 739, 741, 744, 746, 748, 750,
+ 752, 755, 757, 759, 761, 764, 766, 768, 770, 773, 775,
+ 777, 779, 782, 784, 786, 789, 791, 793, 795, 798, 800,
+ 802, 805, 807, 809, 812, 814, 816, 819, 821, 823, 826,
+ 828, 831, 833, 835, 838, 840, 842, 845, 847, 850, 852,
+ 854, 857, 859, 862, 864, 867, 869, 871, 874, 876, 879,
+ 881, 884, 886, 889, 891, 894, 896, 899, 901, 903, 906,
+ 908, 911, 914, 916, 919, 921, 924, 926, 929, 931, 934,
+ 936, 939, 941, 944, 947, 949, 952, 954, 957, 959, 962,
+ 965, 967, 970, 973, 975, 978, 980, 983, 986, 988, 991,
+ 994, 996, 999, 1002, 1004, 1007, 1010, 1012, 1015, 1018, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_28[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 10, 10, 11, 11, 11, 11, 11, 11, 12, 12, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 14,
+ 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16,
+ 16, 16, 16, 16, 17, 17, 17, 17, 17, 18, 18,
+ 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20,
+ 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23,
+ 23, 23, 24, 24, 24, 24, 25, 25, 25, 25, 26,
+ 26, 26, 26, 27, 27, 27, 27, 28, 28, 28, 29,
+ 29, 29, 29, 30, 30, 30, 31, 31, 31, 31, 32,
+ 32, 32, 33, 33, 33, 34, 34, 34, 35, 35, 35,
+ 35, 36, 36, 36, 37, 37, 37, 38, 38, 38, 39,
+ 39, 40, 40, 40, 41, 41, 41, 42, 42, 42, 43,
+ 43, 43, 44, 44, 45, 45, 45, 46, 46, 46, 47,
+ 47, 48, 48, 48, 49, 49, 50, 50, 50, 51, 51,
+ 52, 52, 52, 53, 53, 54, 54, 55, 55, 55, 56,
+ 56, 57, 57, 58, 58, 58, 59, 59, 60, 60, 61,
+ 61, 62, 62, 63, 63, 63, 64, 64, 65, 65, 66,
+ 66, 67, 67, 68, 68, 69, 69, 70, 70, 71, 71,
+ 72, 72, 73, 73, 74, 74, 75, 75, 76, 76, 77,
+ 77, 78, 79, 79, 80, 80, 81, 81, 82, 82, 83,
+ 83, 84, 85, 85, 86, 86, 87, 87, 88, 89, 89,
+ 90, 90, 91, 92, 92, 93, 93, 94, 95, 95, 96,
+ 96, 97, 98, 98, 99, 99, 100, 101, 101, 102, 103,
+ 103, 104, 105, 105, 106, 106, 107, 108, 108, 109, 110,
+ 110, 111, 112, 112, 113, 114, 115, 115, 116, 117, 117,
+ 118, 119, 119, 120, 121, 122, 122, 123, 124, 124, 125,
+ 126, 127, 127, 128, 129, 130, 130, 131, 132, 132, 133,
+ 134, 135, 136, 136, 137, 138, 139, 139, 140, 141, 142,
+ 143, 143, 144, 145, 146, 146, 147, 148, 149, 150, 151,
+ 151, 152, 153, 154, 155, 155, 156, 157, 158, 159, 160,
+ 161, 161, 162, 163, 164, 165, 166, 167, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 241, 242, 243, 244, 245, 246,
+ 247, 248, 249, 251, 252, 253, 254, 255, 256, 257, 259,
+ 260, 261, 262, 263, 264, 266, 267, 268, 269, 270, 272,
+ 273, 274, 275, 276, 278, 279, 280, 281, 282, 284, 285,
+ 286, 287, 289, 290, 291, 292, 294, 295, 296, 297, 299,
+ 300, 301, 302, 304, 305, 306, 308, 309, 310, 311, 313,
+ 314, 315, 317, 318, 319, 321, 322, 323, 325, 326, 327,
+ 329, 330, 331, 333, 334, 336, 337, 338, 340, 341, 342,
+ 344, 345, 347, 348, 349, 351, 352, 354, 355, 356, 358,
+ 359, 361, 362, 364, 365, 366, 368, 369, 371, 372, 374,
+ 375, 377, 378, 380, 381, 383, 384, 386, 387, 389, 390,
+ 392, 393, 395, 396, 398, 399, 401, 402, 404, 405, 407,
+ 408, 410, 412, 413, 415, 416, 418, 419, 421, 423, 424,
+ 426, 427, 429, 431, 432, 434, 435, 437, 439, 440, 442,
+ 444, 445, 447, 448, 450, 452, 453, 455, 457, 458, 460,
+ 462, 463, 465, 467, 468, 470, 472, 474, 475, 477, 479,
+ 480, 482, 484, 486, 487, 489, 491, 493, 494, 496, 498,
+ 500, 501, 503, 505, 507, 509, 510, 512, 514, 516, 518,
+ 519, 521, 523, 525, 527, 528, 530, 532, 534, 536, 538,
+ 539, 541, 543, 545, 547, 549, 551, 553, 554, 556, 558,
+ 560, 562, 564, 566, 568, 570, 572, 574, 575, 577, 579,
+ 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601,
+ 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623,
+ 625, 627, 629, 631, 633, 635, 637, 640, 642, 644, 646,
+ 648, 650, 652, 654, 656, 658, 660, 663, 665, 667, 669,
+ 671, 673, 675, 678, 680, 682, 684, 686, 688, 690, 693,
+ 695, 697, 699, 701, 704, 706, 708, 710, 712, 715, 717,
+ 719, 721, 724, 726, 728, 730, 733, 735, 737, 739, 742,
+ 744, 746, 749, 751, 753, 755, 758, 760, 762, 765, 767,
+ 769, 772, 774, 776, 779, 781, 783, 786, 788, 790, 793,
+ 795, 798, 800, 802, 805, 807, 810, 812, 814, 817, 819,
+ 822, 824, 827, 829, 831, 834, 836, 839, 841, 844, 846,
+ 849, 851, 854, 856, 859, 861, 864, 866, 869, 871, 874,
+ 876, 879, 881, 884, 887, 889, 892, 894, 897, 899, 902,
+ 905, 907, 910, 912, 915, 918, 920, 923, 925, 928, 931,
+ 933, 936, 939, 941, 944, 947, 949, 952, 955, 957, 960,
+ 963, 965, 968, 971, 973, 976, 979, 982, 984, 987, 990,
+ 992, 995, 998, 1001, 1004, 1006, 1009, 1012, 1015, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_29[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9,
+ 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 10, 10, 11, 11, 11, 11, 11, 11, 11, 12, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15,
+ 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18,
+ 18, 18, 18, 18, 19, 19, 19, 19, 19, 20, 20,
+ 20, 20, 21, 21, 21, 21, 21, 22, 22, 22, 22,
+ 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25,
+ 25, 26, 26, 26, 26, 27, 27, 27, 28, 28, 28,
+ 28, 29, 29, 29, 29, 30, 30, 30, 31, 31, 31,
+ 31, 32, 32, 32, 33, 33, 33, 34, 34, 34, 35,
+ 35, 35, 35, 36, 36, 36, 37, 37, 37, 38, 38,
+ 38, 39, 39, 39, 40, 40, 41, 41, 41, 42, 42,
+ 42, 43, 43, 43, 44, 44, 44, 45, 45, 46, 46,
+ 46, 47, 47, 48, 48, 48, 49, 49, 49, 50, 50,
+ 51, 51, 52, 52, 52, 53, 53, 54, 54, 54, 55,
+ 55, 56, 56, 57, 57, 57, 58, 58, 59, 59, 60,
+ 60, 61, 61, 61, 62, 62, 63, 63, 64, 64, 65,
+ 65, 66, 66, 67, 67, 68, 68, 69, 69, 70, 70,
+ 71, 71, 72, 72, 73, 73, 74, 74, 75, 75, 76,
+ 76, 77, 77, 78, 78, 79, 80, 80, 81, 81, 82,
+ 82, 83, 83, 84, 85, 85, 86, 86, 87, 87, 88,
+ 89, 89, 90, 90, 91, 92, 92, 93, 93, 94, 95,
+ 95, 96, 96, 97, 98, 98, 99, 99, 100, 101, 101,
+ 102, 103, 103, 104, 105, 105, 106, 107, 107, 108, 109,
+ 109, 110, 111, 111, 112, 113, 113, 114, 115, 115, 116,
+ 117, 117, 118, 119, 120, 120, 121, 122, 122, 123, 124,
+ 125, 125, 126, 127, 128, 128, 129, 130, 131, 131, 132,
+ 133, 134, 134, 135, 136, 137, 137, 138, 139, 140, 141,
+ 141, 142, 143, 144, 145, 145, 146, 147, 148, 149, 149,
+ 150, 151, 152, 153, 154, 154, 155, 156, 157, 158, 159,
+ 160, 160, 161, 162, 163, 164, 165, 166, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
+ 211, 212, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 241, 242, 243, 244, 245, 246,
+ 247, 248, 250, 251, 252, 253, 254, 255, 257, 258, 259,
+ 260, 261, 263, 264, 265, 266, 267, 269, 270, 271, 272,
+ 273, 275, 276, 277, 278, 280, 281, 282, 283, 285, 286,
+ 287, 288, 290, 291, 292, 293, 295, 296, 297, 299, 300,
+ 301, 302, 304, 305, 306, 308, 309, 310, 312, 313, 314,
+ 316, 317, 318, 320, 321, 322, 324, 325, 327, 328, 329,
+ 331, 332, 333, 335, 336, 338, 339, 340, 342, 343, 345,
+ 346, 348, 349, 350, 352, 353, 355, 356, 358, 359, 361,
+ 362, 363, 365, 366, 368, 369, 371, 372, 374, 375, 377,
+ 378, 380, 381, 383, 384, 386, 388, 389, 391, 392, 394,
+ 395, 397, 398, 400, 402, 403, 405, 406, 408, 409, 411,
+ 413, 414, 416, 417, 419, 421, 422, 424, 426, 427, 429,
+ 430, 432, 434, 435, 437, 439, 440, 442, 444, 445, 447,
+ 449, 450, 452, 454, 456, 457, 459, 461, 462, 464, 466,
+ 468, 469, 471, 473, 475, 476, 478, 480, 482, 483, 485,
+ 487, 489, 491, 492, 494, 496, 498, 500, 501, 503, 505,
+ 507, 509, 511, 512, 514, 516, 518, 520, 522, 524, 525,
+ 527, 529, 531, 533, 535, 537, 539, 541, 542, 544, 546,
+ 548, 550, 552, 554, 556, 558, 560, 562, 564, 566, 568,
+ 570, 572, 574, 576, 578, 580, 582, 584, 586, 588, 590,
+ 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612,
+ 614, 616, 618, 621, 623, 625, 627, 629, 631, 633, 635,
+ 637, 640, 642, 644, 646, 648, 650, 652, 655, 657, 659,
+ 661, 663, 665, 668, 670, 672, 674, 676, 679, 681, 683,
+ 685, 688, 690, 692, 694, 697, 699, 701, 703, 706, 708,
+ 710, 712, 715, 717, 719, 722, 724, 726, 729, 731, 733,
+ 736, 738, 740, 743, 745, 747, 750, 752, 754, 757, 759,
+ 762, 764, 766, 769, 771, 774, 776, 778, 781, 783, 786,
+ 788, 791, 793, 795, 798, 800, 803, 805, 808, 810, 813,
+ 815, 818, 820, 823, 825, 828, 830, 833, 835, 838, 841,
+ 843, 846, 848, 851, 853, 856, 859, 861, 864, 866, 869,
+ 872, 874, 877, 879, 882, 885, 887, 890, 893, 895, 898,
+ 901, 903, 906, 909, 911, 914, 917, 919, 922, 925, 927,
+ 930, 933, 936, 938, 941, 944, 947, 949, 952, 955, 958,
+ 960, 963, 966, 969, 972, 974, 977, 980, 983, 986, 989,
+ 991, 994, 997, 1000, 1003, 1006, 1009, 1011, 1014, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_30[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9,
+ 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15,
+ 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17,
+ 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 20,
+ 20, 20, 20, 21, 21, 21, 21, 21, 22, 22, 22,
+ 22, 23, 23, 23, 23, 24, 24, 24, 24, 25, 25,
+ 25, 25, 26, 26, 26, 26, 27, 27, 27, 27, 28,
+ 28, 28, 28, 29, 29, 29, 30, 30, 30, 30, 31,
+ 31, 31, 32, 32, 32, 33, 33, 33, 33, 34, 34,
+ 34, 35, 35, 35, 36, 36, 36, 37, 37, 37, 38,
+ 38, 38, 39, 39, 39, 40, 40, 40, 41, 41, 41,
+ 42, 42, 42, 43, 43, 43, 44, 44, 45, 45, 45,
+ 46, 46, 46, 47, 47, 48, 48, 48, 49, 49, 50,
+ 50, 50, 51, 51, 52, 52, 52, 53, 53, 54, 54,
+ 55, 55, 55, 56, 56, 57, 57, 58, 58, 58, 59,
+ 59, 60, 60, 61, 61, 62, 62, 63, 63, 63, 64,
+ 64, 65, 65, 66, 66, 67, 67, 68, 68, 69, 69,
+ 70, 70, 71, 71, 72, 72, 73, 73, 74, 74, 75,
+ 75, 76, 77, 77, 78, 78, 79, 79, 80, 80, 81,
+ 81, 82, 83, 83, 84, 84, 85, 85, 86, 86, 87,
+ 88, 88, 89, 89, 90, 91, 91, 92, 92, 93, 94,
+ 94, 95, 95, 96, 97, 97, 98, 99, 99, 100, 100,
+ 101, 102, 102, 103, 104, 104, 105, 106, 106, 107, 108,
+ 108, 109, 110, 110, 111, 112, 112, 113, 114, 114, 115,
+ 116, 117, 117, 118, 119, 119, 120, 121, 122, 122, 123,
+ 124, 125, 125, 126, 127, 128, 128, 129, 130, 131, 131,
+ 132, 133, 134, 134, 135, 136, 137, 137, 138, 139, 140,
+ 141, 141, 142, 143, 144, 145, 146, 146, 147, 148, 149,
+ 150, 150, 151, 152, 153, 154, 155, 156, 156, 157, 158,
+ 159, 160, 161, 162, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 185, 186, 187, 188,
+ 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 236, 237, 238, 239, 240, 241, 242, 244, 245, 246, 247,
+ 248, 249, 250, 252, 253, 254, 255, 256, 258, 259, 260,
+ 261, 262, 264, 265, 266, 267, 269, 270, 271, 272, 273,
+ 275, 276, 277, 278, 280, 281, 282, 284, 285, 286, 287,
+ 289, 290, 291, 293, 294, 295, 296, 298, 299, 300, 302,
+ 303, 304, 306, 307, 308, 310, 311, 313, 314, 315, 317,
+ 318, 319, 321, 322, 324, 325, 326, 328, 329, 331, 332,
+ 333, 335, 336, 338, 339, 341, 342, 343, 345, 346, 348,
+ 349, 351, 352, 354, 355, 357, 358, 360, 361, 363, 364,
+ 366, 367, 369, 370, 372, 373, 375, 376, 378, 379, 381,
+ 383, 384, 386, 387, 389, 390, 392, 394, 395, 397, 398,
+ 400, 402, 403, 405, 406, 408, 410, 411, 413, 415, 416,
+ 418, 419, 421, 423, 424, 426, 428, 429, 431, 433, 435,
+ 436, 438, 440, 441, 443, 445, 447, 448, 450, 452, 453,
+ 455, 457, 459, 460, 462, 464, 466, 468, 469, 471, 473,
+ 475, 477, 478, 480, 482, 484, 486, 487, 489, 491, 493,
+ 495, 497, 498, 500, 502, 504, 506, 508, 510, 512, 513,
+ 515, 517, 519, 521, 523, 525, 527, 529, 531, 533, 535,
+ 537, 539, 540, 542, 544, 546, 548, 550, 552, 554, 556,
+ 558, 560, 562, 564, 566, 568, 570, 572, 574, 577, 579,
+ 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601,
+ 604, 606, 608, 610, 612, 614, 616, 618, 621, 623, 625,
+ 627, 629, 631, 634, 636, 638, 640, 642, 645, 647, 649,
+ 651, 653, 656, 658, 660, 662, 665, 667, 669, 671, 674,
+ 676, 678, 680, 683, 685, 687, 690, 692, 694, 697, 699,
+ 701, 704, 706, 708, 711, 713, 715, 718, 720, 722, 725,
+ 727, 730, 732, 734, 737, 739, 742, 744, 746, 749, 751,
+ 754, 756, 759, 761, 764, 766, 769, 771, 774, 776, 779,
+ 781, 784, 786, 789, 791, 794, 796, 799, 801, 804, 806,
+ 809, 812, 814, 817, 819, 822, 824, 827, 830, 832, 835,
+ 837, 840, 843, 845, 848, 851, 853, 856, 859, 861, 864,
+ 867, 869, 872, 875, 878, 880, 883, 886, 888, 891, 894,
+ 897, 899, 902, 905, 908, 910, 913, 916, 919, 922, 924,
+ 927, 930, 933, 936, 938, 941, 944, 947, 950, 953, 956,
+ 958, 961, 964, 967, 970, 973, 976, 979, 982, 984, 987,
+ 990, 993, 996, 999, 1002, 1005, 1008, 1011, 1014, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_31[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9,
+ 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21, 21, 22, 22,
+ 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 25,
+ 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 27,
+ 28, 28, 28, 28, 29, 29, 29, 30, 30, 30, 30,
+ 31, 31, 31, 32, 32, 32, 32, 33, 33, 33, 34,
+ 34, 34, 35, 35, 35, 36, 36, 36, 36, 37, 37,
+ 37, 38, 38, 38, 39, 39, 39, 40, 40, 41, 41,
+ 41, 42, 42, 42, 43, 43, 43, 44, 44, 44, 45,
+ 45, 46, 46, 46, 47, 47, 47, 48, 48, 49, 49,
+ 49, 50, 50, 51, 51, 51, 52, 52, 53, 53, 54,
+ 54, 54, 55, 55, 56, 56, 57, 57, 57, 58, 58,
+ 59, 59, 60, 60, 61, 61, 61, 62, 62, 63, 63,
+ 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69,
+ 69, 70, 70, 71, 71, 72, 72, 73, 73, 74, 74,
+ 75, 75, 76, 76, 77, 77, 78, 79, 79, 80, 80,
+ 81, 81, 82, 82, 83, 84, 84, 85, 85, 86, 86,
+ 87, 88, 88, 89, 89, 90, 91, 91, 92, 92, 93,
+ 94, 94, 95, 95, 96, 97, 97, 98, 99, 99, 100,
+ 101, 101, 102, 102, 103, 104, 104, 105, 106, 106, 107,
+ 108, 108, 109, 110, 111, 111, 112, 113, 113, 114, 115,
+ 115, 116, 117, 118, 118, 119, 120, 120, 121, 122, 123,
+ 123, 124, 125, 126, 126, 127, 128, 129, 129, 130, 131,
+ 132, 132, 133, 134, 135, 136, 136, 137, 138, 139, 140,
+ 140, 141, 142, 143, 144, 144, 145, 146, 147, 148, 149,
+ 149, 150, 151, 152, 153, 154, 155, 155, 156, 157, 158,
+ 159, 160, 161, 162, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 213, 214, 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 227, 228, 229, 230, 231, 232, 233, 234, 236,
+ 237, 238, 239, 240, 241, 243, 244, 245, 246, 247, 248,
+ 250, 251, 252, 253, 254, 256, 257, 258, 259, 260, 262,
+ 263, 264, 265, 267, 268, 269, 270, 272, 273, 274, 275,
+ 277, 278, 279, 281, 282, 283, 285, 286, 287, 288, 290,
+ 291, 292, 294, 295, 296, 298, 299, 300, 302, 303, 305,
+ 306, 307, 309, 310, 311, 313, 314, 316, 317, 318, 320,
+ 321, 323, 324, 325, 327, 328, 330, 331, 333, 334, 336,
+ 337, 338, 340, 341, 343, 344, 346, 347, 349, 350, 352,
+ 353, 355, 356, 358, 359, 361, 362, 364, 366, 367, 369,
+ 370, 372, 373, 375, 376, 378, 380, 381, 383, 384, 386,
+ 388, 389, 391, 392, 394, 396, 397, 399, 401, 402, 404,
+ 406, 407, 409, 411, 412, 414, 416, 417, 419, 421, 422,
+ 424, 426, 427, 429, 431, 433, 434, 436, 438, 440, 441,
+ 443, 445, 447, 448, 450, 452, 454, 456, 457, 459, 461,
+ 463, 465, 466, 468, 470, 472, 474, 476, 477, 479, 481,
+ 483, 485, 487, 489, 490, 492, 494, 496, 498, 500, 502,
+ 504, 506, 508, 510, 511, 513, 515, 517, 519, 521, 523,
+ 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545,
+ 547, 549, 551, 553, 555, 557, 559, 561, 564, 566, 568,
+ 570, 572, 574, 576, 578, 580, 582, 584, 587, 589, 591,
+ 593, 595, 597, 599, 602, 604, 606, 608, 610, 613, 615,
+ 617, 619, 621, 624, 626, 628, 630, 632, 635, 637, 639,
+ 641, 644, 646, 648, 651, 653, 655, 657, 660, 662, 664,
+ 667, 669, 671, 674, 676, 678, 681, 683, 685, 688, 690,
+ 692, 695, 697, 700, 702, 704, 707, 709, 712, 714, 717,
+ 719, 721, 724, 726, 729, 731, 734, 736, 739, 741, 744,
+ 746, 749, 751, 754, 756, 759, 761, 764, 766, 769, 772,
+ 774, 777, 779, 782, 784, 787, 790, 792, 795, 797, 800,
+ 803, 805, 808, 811, 813, 816, 819, 821, 824, 827, 829,
+ 832, 835, 837, 840, 843, 845, 848, 851, 854, 856, 859,
+ 862, 865, 867, 870, 873, 876, 879, 881, 884, 887, 890,
+ 893, 895, 898, 901, 904, 907, 910, 913, 915, 918, 921,
+ 924, 927, 930, 933, 936, 939, 942, 945, 947, 950, 953,
+ 956, 959, 962, 965, 968, 971, 974, 977, 980, 983, 986,
+ 989, 992, 995, 998, 1001, 1005, 1008, 1011, 1014, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_32[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19,
+ 20, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22,
+ 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24,
+ 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27,
+ 27, 28, 28, 28, 28, 29, 29, 29, 30, 30, 30,
+ 30, 31, 31, 31, 32, 32, 32, 32, 33, 33, 33,
+ 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 41,
+ 41, 41, 42, 42, 42, 43, 43, 43, 44, 44, 44,
+ 45, 45, 46, 46, 46, 47, 47, 48, 48, 48, 49,
+ 49, 49, 50, 50, 51, 51, 51, 52, 52, 53, 53,
+ 54, 54, 54, 55, 55, 56, 56, 57, 57, 57, 58,
+ 58, 59, 59, 60, 60, 61, 61, 62, 62, 62, 63,
+ 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74,
+ 74, 75, 75, 76, 77, 77, 78, 78, 79, 79, 80,
+ 80, 81, 81, 82, 83, 83, 84, 84, 85, 85, 86,
+ 87, 87, 88, 88, 89, 90, 90, 91, 91, 92, 93,
+ 93, 94, 95, 95, 96, 96, 97, 98, 98, 99, 100,
+ 100, 101, 102, 102, 103, 104, 104, 105, 106, 106, 107,
+ 108, 108, 109, 110, 110, 111, 112, 112, 113, 114, 114,
+ 115, 116, 117, 117, 118, 119, 120, 120, 121, 122, 122,
+ 123, 124, 125, 125, 126, 127, 128, 129, 129, 130, 131,
+ 132, 132, 133, 134, 135, 136, 136, 137, 138, 139, 140,
+ 140, 141, 142, 143, 144, 145, 145, 146, 147, 148, 149,
+ 150, 150, 151, 152, 153, 154, 155, 156, 157, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 186, 187, 188, 189,
+ 190, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 214, 215, 216, 217, 218, 219, 220, 221, 222, 224, 225,
+ 226, 227, 228, 229, 230, 231, 233, 234, 235, 236, 237,
+ 238, 240, 241, 242, 243, 244, 246, 247, 248, 249, 250,
+ 252, 253, 254, 255, 257, 258, 259, 260, 262, 263, 264,
+ 265, 267, 268, 269, 270, 272, 273, 274, 276, 277, 278,
+ 280, 281, 282, 283, 285, 286, 287, 289, 290, 291, 293,
+ 294, 296, 297, 298, 300, 301, 302, 304, 305, 307, 308,
+ 309, 311, 312, 314, 315, 316, 318, 319, 321, 322, 324,
+ 325, 327, 328, 330, 331, 332, 334, 335, 337, 338, 340,
+ 341, 343, 344, 346, 347, 349, 351, 352, 354, 355, 357,
+ 358, 360, 361, 363, 364, 366, 368, 369, 371, 372, 374,
+ 376, 377, 379, 380, 382, 384, 385, 387, 389, 390, 392,
+ 394, 395, 397, 399, 400, 402, 404, 405, 407, 409, 410,
+ 412, 414, 416, 417, 419, 421, 423, 424, 426, 428, 430,
+ 431, 433, 435, 437, 438, 440, 442, 444, 446, 447, 449,
+ 451, 453, 455, 457, 458, 460, 462, 464, 466, 468, 469,
+ 471, 473, 475, 477, 479, 481, 483, 485, 487, 488, 490,
+ 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512,
+ 514, 516, 518, 520, 522, 524, 526, 528, 530, 532, 534,
+ 536, 538, 540, 542, 544, 547, 549, 551, 553, 555, 557,
+ 559, 561, 563, 565, 568, 570, 572, 574, 576, 578, 581,
+ 583, 585, 587, 589, 591, 594, 596, 598, 600, 602, 605,
+ 607, 609, 611, 614, 616, 618, 620, 623, 625, 627, 630,
+ 632, 634, 636, 639, 641, 643, 646, 648, 650, 653, 655,
+ 657, 660, 662, 665, 667, 669, 672, 674, 677, 679, 681,
+ 684, 686, 689, 691, 694, 696, 698, 701, 703, 706, 708,
+ 711, 713, 716, 718, 721, 723, 726, 728, 731, 734, 736,
+ 739, 741, 744, 746, 749, 751, 754, 757, 759, 762, 765,
+ 767, 770, 772, 775, 778, 780, 783, 786, 788, 791, 794,
+ 796, 799, 802, 804, 807, 810, 813, 815, 818, 821, 824,
+ 826, 829, 832, 835, 838, 840, 843, 846, 849, 852, 854,
+ 857, 860, 863, 866, 869, 871, 874, 877, 880, 883, 886,
+ 889, 892, 895, 897, 900, 903, 906, 909, 912, 915, 918,
+ 921, 924, 927, 930, 933, 936, 939, 942, 945, 948, 951,
+ 954, 957, 960, 963, 967, 970, 973, 976, 979, 982, 985,
+ 988, 991, 994, 998, 1001, 1004, 1007, 1010, 1013, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_33[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 17, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+ 19, 20, 20, 20, 20, 21, 21, 21, 21, 21, 22,
+ 22, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24,
+ 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27,
+ 27, 28, 28, 28, 28, 29, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40,
+ 41, 41, 41, 42, 42, 42, 43, 43, 44, 44, 44,
+ 45, 45, 45, 46, 46, 47, 47, 47, 48, 48, 48,
+ 49, 49, 50, 50, 50, 51, 51, 52, 52, 53, 53,
+ 53, 54, 54, 55, 55, 55, 56, 56, 57, 57, 58,
+ 58, 59, 59, 59, 60, 60, 61, 61, 62, 62, 63,
+ 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74,
+ 74, 75, 75, 76, 76, 77, 77, 78, 79, 79, 80,
+ 80, 81, 81, 82, 82, 83, 84, 84, 85, 85, 86,
+ 87, 87, 88, 88, 89, 90, 90, 91, 91, 92, 93,
+ 93, 94, 94, 95, 96, 96, 97, 98, 98, 99, 100,
+ 100, 101, 102, 102, 103, 104, 104, 105, 106, 106, 107,
+ 108, 108, 109, 110, 110, 111, 112, 112, 113, 114, 115,
+ 115, 116, 117, 118, 118, 119, 120, 120, 121, 122, 123,
+ 123, 124, 125, 126, 127, 127, 128, 129, 130, 130, 131,
+ 132, 133, 134, 134, 135, 136, 137, 138, 138, 139, 140,
+ 141, 142, 143, 143, 144, 145, 146, 147, 148, 148, 149,
+ 150, 151, 152, 153, 154, 155, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 218, 219, 220, 221, 222, 223, 224, 226, 227,
+ 228, 229, 230, 231, 233, 234, 235, 236, 237, 238, 240,
+ 241, 242, 243, 245, 246, 247, 248, 249, 251, 252, 253,
+ 254, 256, 257, 258, 259, 261, 262, 263, 265, 266, 267,
+ 268, 270, 271, 272, 274, 275, 276, 278, 279, 280, 282,
+ 283, 284, 286, 287, 288, 290, 291, 292, 294, 295, 297,
+ 298, 299, 301, 302, 304, 305, 307, 308, 309, 311, 312,
+ 314, 315, 317, 318, 320, 321, 322, 324, 325, 327, 328,
+ 330, 331, 333, 334, 336, 337, 339, 341, 342, 344, 345,
+ 347, 348, 350, 351, 353, 355, 356, 358, 359, 361, 362,
+ 364, 366, 367, 369, 371, 372, 374, 375, 377, 379, 380,
+ 382, 384, 385, 387, 389, 390, 392, 394, 395, 397, 399,
+ 401, 402, 404, 406, 408, 409, 411, 413, 414, 416, 418,
+ 420, 422, 423, 425, 427, 429, 431, 432, 434, 436, 438,
+ 440, 441, 443, 445, 447, 449, 451, 453, 454, 456, 458,
+ 460, 462, 464, 466, 468, 470, 472, 473, 475, 477, 479,
+ 481, 483, 485, 487, 489, 491, 493, 495, 497, 499, 501,
+ 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523,
+ 525, 528, 530, 532, 534, 536, 538, 540, 542, 544, 547,
+ 549, 551, 553, 555, 557, 559, 562, 564, 566, 568, 570,
+ 573, 575, 577, 579, 581, 584, 586, 588, 590, 593, 595,
+ 597, 599, 602, 604, 606, 609, 611, 613, 615, 618, 620,
+ 622, 625, 627, 629, 632, 634, 637, 639, 641, 644, 646,
+ 648, 651, 653, 656, 658, 661, 663, 665, 668, 670, 673,
+ 675, 678, 680, 683, 685, 688, 690, 693, 695, 698, 700,
+ 703, 705, 708, 710, 713, 716, 718, 721, 723, 726, 729,
+ 731, 734, 736, 739, 742, 744, 747, 750, 752, 755, 758,
+ 760, 763, 766, 768, 771, 774, 776, 779, 782, 785, 787,
+ 790, 793, 796, 798, 801, 804, 807, 810, 812, 815, 818,
+ 821, 824, 827, 829, 832, 835, 838, 841, 844, 847, 850,
+ 852, 855, 858, 861, 864, 867, 870, 873, 876, 879, 882,
+ 885, 888, 891, 894, 897, 900, 903, 906, 909, 912, 915,
+ 918, 921, 924, 927, 930, 933, 937, 940, 943, 946, 949,
+ 952, 955, 958, 962, 965, 968, 971, 974, 978, 981, 984,
+ 987, 990, 994, 997, 1000, 1003, 1007, 1010, 1013, 1016, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_34[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 17, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+ 19, 20, 20, 20, 20, 21, 21, 21, 21, 21, 22,
+ 22, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24,
+ 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27,
+ 27, 27, 28, 28, 28, 29, 29, 29, 29, 30, 30,
+ 30, 30, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40,
+ 41, 41, 41, 42, 42, 42, 43, 43, 43, 44, 44,
+ 45, 45, 45, 46, 46, 46, 47, 47, 48, 48, 48,
+ 49, 49, 50, 50, 50, 51, 51, 52, 52, 52, 53,
+ 53, 54, 54, 55, 55, 55, 56, 56, 57, 57, 58,
+ 58, 59, 59, 59, 60, 60, 61, 61, 62, 62, 63,
+ 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74,
+ 74, 75, 75, 76, 76, 77, 78, 78, 79, 79, 80,
+ 80, 81, 81, 82, 83, 83, 84, 84, 85, 86, 86,
+ 87, 87, 88, 88, 89, 90, 90, 91, 92, 92, 93,
+ 93, 94, 95, 95, 96, 97, 97, 98, 99, 99, 100,
+ 101, 101, 102, 102, 103, 104, 105, 105, 106, 107, 107,
+ 108, 109, 109, 110, 111, 111, 112, 113, 114, 114, 115,
+ 116, 117, 117, 118, 119, 119, 120, 121, 122, 123, 123,
+ 124, 125, 126, 126, 127, 128, 129, 129, 130, 131, 132,
+ 133, 133, 134, 135, 136, 137, 138, 138, 139, 140, 141,
+ 142, 143, 143, 144, 145, 146, 147, 148, 149, 149, 150,
+ 151, 152, 153, 154, 155, 156, 157, 157, 158, 159, 160,
+ 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181,
+ 182, 183, 184, 185, 186, 187, 188, 189, 191, 192, 193,
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 207, 208, 209, 210, 211, 212, 213, 214, 215, 217,
+ 218, 219, 220, 221, 222, 223, 225, 226, 227, 228, 229,
+ 231, 232, 233, 234, 235, 237, 238, 239, 240, 241, 243,
+ 244, 245, 246, 248, 249, 250, 251, 253, 254, 255, 256,
+ 258, 259, 260, 262, 263, 264, 266, 267, 268, 269, 271,
+ 272, 273, 275, 276, 278, 279, 280, 282, 283, 284, 286,
+ 287, 288, 290, 291, 293, 294, 296, 297, 298, 300, 301,
+ 303, 304, 306, 307, 308, 310, 311, 313, 314, 316, 317,
+ 319, 320, 322, 323, 325, 326, 328, 329, 331, 332, 334,
+ 335, 337, 339, 340, 342, 343, 345, 346, 348, 350, 351,
+ 353, 354, 356, 358, 359, 361, 363, 364, 366, 367, 369,
+ 371, 372, 374, 376, 377, 379, 381, 383, 384, 386, 388,
+ 389, 391, 393, 395, 396, 398, 400, 402, 403, 405, 407,
+ 409, 410, 412, 414, 416, 418, 419, 421, 423, 425, 427,
+ 429, 430, 432, 434, 436, 438, 440, 442, 443, 445, 447,
+ 449, 451, 453, 455, 457, 459, 461, 463, 464, 466, 468,
+ 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490,
+ 492, 494, 496, 498, 500, 503, 505, 507, 509, 511, 513,
+ 515, 517, 519, 521, 523, 526, 528, 530, 532, 534, 536,
+ 538, 541, 543, 545, 547, 549, 551, 554, 556, 558, 560,
+ 563, 565, 567, 569, 572, 574, 576, 578, 581, 583, 585,
+ 587, 590, 592, 594, 597, 599, 601, 604, 606, 608, 611,
+ 613, 615, 618, 620, 623, 625, 627, 630, 632, 635, 637,
+ 640, 642, 644, 647, 649, 652, 654, 657, 659, 662, 664,
+ 667, 669, 672, 674, 677, 679, 682, 685, 687, 690, 692,
+ 695, 697, 700, 703, 705, 708, 711, 713, 716, 718, 721,
+ 724, 726, 729, 732, 734, 737, 740, 743, 745, 748, 751,
+ 753, 756, 759, 762, 764, 767, 770, 773, 776, 778, 781,
+ 784, 787, 790, 793, 795, 798, 801, 804, 807, 810, 813,
+ 815, 818, 821, 824, 827, 830, 833, 836, 839, 842, 845,
+ 848, 851, 854, 857, 860, 863, 866, 869, 872, 875, 878,
+ 881, 884, 887, 890, 893, 896, 899, 903, 906, 909, 912,
+ 915, 918, 921, 925, 928, 931, 934, 937, 940, 944, 947,
+ 950, 953, 957, 960, 963, 966, 970, 973, 976, 979, 983,
+ 986, 989, 993, 996, 999, 1003, 1006, 1009, 1013, 1016, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_35[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9,
+ 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
+ 10, 10, 11, 11, 11, 11, 11, 11, 11, 12, 12,
+ 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+ 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 22,
+ 22, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24,
+ 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27,
+ 27, 27, 28, 28, 28, 29, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40,
+ 41, 41, 41, 42, 42, 42, 43, 43, 43, 44, 44,
+ 45, 45, 45, 46, 46, 47, 47, 47, 48, 48, 48,
+ 49, 49, 50, 50, 50, 51, 51, 52, 52, 53, 53,
+ 53, 54, 54, 55, 55, 56, 56, 56, 57, 57, 58,
+ 58, 59, 59, 60, 60, 60, 61, 61, 62, 62, 63,
+ 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74,
+ 75, 75, 76, 76, 77, 77, 78, 78, 79, 79, 80,
+ 81, 81, 82, 82, 83, 84, 84, 85, 85, 86, 86,
+ 87, 88, 88, 89, 89, 90, 91, 91, 92, 93, 93,
+ 94, 95, 95, 96, 96, 97, 98, 98, 99, 100, 100,
+ 101, 102, 102, 103, 104, 104, 105, 106, 107, 107, 108,
+ 109, 109, 110, 111, 111, 112, 113, 114, 114, 115, 116,
+ 117, 117, 118, 119, 120, 120, 121, 122, 123, 123, 124,
+ 125, 126, 126, 127, 128, 129, 130, 130, 131, 132, 133,
+ 134, 135, 135, 136, 137, 138, 139, 140, 140, 141, 142,
+ 143, 144, 145, 146, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 188, 189, 190, 191, 192, 193, 194, 195,
+ 196, 197, 198, 199, 200, 201, 202, 204, 205, 206, 207,
+ 208, 209, 210, 211, 213, 214, 215, 216, 217, 218, 219,
+ 221, 222, 223, 224, 225, 227, 228, 229, 230, 231, 233,
+ 234, 235, 236, 237, 239, 240, 241, 242, 244, 245, 246,
+ 247, 249, 250, 251, 253, 254, 255, 256, 258, 259, 260,
+ 262, 263, 264, 266, 267, 268, 270, 271, 272, 274, 275,
+ 277, 278, 279, 281, 282, 284, 285, 286, 288, 289, 291,
+ 292, 293, 295, 296, 298, 299, 301, 302, 304, 305, 307,
+ 308, 310, 311, 313, 314, 316, 317, 319, 320, 322, 323,
+ 325, 326, 328, 329, 331, 332, 334, 336, 337, 339, 340,
+ 342, 344, 345, 347, 348, 350, 352, 353, 355, 357, 358,
+ 360, 362, 363, 365, 367, 368, 370, 372, 373, 375, 377,
+ 378, 380, 382, 384, 385, 387, 389, 391, 392, 394, 396,
+ 398, 400, 401, 403, 405, 407, 409, 410, 412, 414, 416,
+ 418, 420, 421, 423, 425, 427, 429, 431, 433, 435, 436,
+ 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458,
+ 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480,
+ 482, 484, 486, 488, 490, 492, 494, 496, 498, 500, 503,
+ 505, 507, 509, 511, 513, 515, 517, 520, 522, 524, 526,
+ 528, 531, 533, 535, 537, 539, 542, 544, 546, 548, 550,
+ 553, 555, 557, 560, 562, 564, 566, 569, 571, 573, 576,
+ 578, 580, 583, 585, 587, 590, 592, 594, 597, 599, 602,
+ 604, 606, 609, 611, 614, 616, 618, 621, 623, 626, 628,
+ 631, 633, 636, 638, 641, 643, 646, 648, 651, 653, 656,
+ 658, 661, 664, 666, 669, 671, 674, 677, 679, 682, 684,
+ 687, 690, 692, 695, 698, 700, 703, 706, 708, 711, 714,
+ 716, 719, 722, 725, 727, 730, 733, 736, 738, 741, 744,
+ 747, 750, 752, 755, 758, 761, 764, 766, 769, 772, 775,
+ 778, 781, 784, 787, 789, 792, 795, 798, 801, 804, 807,
+ 810, 813, 816, 819, 822, 825, 828, 831, 834, 837, 840,
+ 843, 846, 849, 852, 855, 858, 862, 865, 868, 871, 874,
+ 877, 880, 883, 887, 890, 893, 896, 899, 902, 906, 909,
+ 912, 915, 919, 922, 925, 928, 932, 935, 938, 941, 945,
+ 948, 951, 955, 958, 961, 965, 968, 971, 975, 978, 982,
+ 985, 988, 992, 995, 999, 1002, 1006, 1009, 1013, 1016, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_36[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9,
+ 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15,
+ 15, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19,
+ 20, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22,
+ 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24,
+ 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27,
+ 27, 28, 28, 28, 28, 29, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 34, 34, 34, 35, 35, 35, 35, 36, 36, 36, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40,
+ 41, 41, 41, 42, 42, 43, 43, 43, 44, 44, 44,
+ 45, 45, 46, 46, 46, 47, 47, 47, 48, 48, 49,
+ 49, 49, 50, 50, 51, 51, 52, 52, 52, 53, 53,
+ 54, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58,
+ 58, 59, 59, 60, 60, 61, 61, 62, 62, 63, 63,
+ 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69,
+ 69, 70, 70, 71, 71, 72, 72, 73, 73, 74, 74,
+ 75, 76, 76, 77, 77, 78, 78, 79, 79, 80, 81,
+ 81, 82, 82, 83, 83, 84, 85, 85, 86, 86, 87,
+ 88, 88, 89, 90, 90, 91, 91, 92, 93, 93, 94,
+ 95, 95, 96, 97, 97, 98, 99, 99, 100, 101, 101,
+ 102, 103, 103, 104, 105, 105, 106, 107, 107, 108, 109,
+ 110, 110, 111, 112, 112, 113, 114, 115, 115, 116, 117,
+ 118, 118, 119, 120, 121, 121, 122, 123, 124, 125, 125,
+ 126, 127, 128, 129, 129, 130, 131, 132, 133, 133, 134,
+ 135, 136, 137, 138, 138, 139, 140, 141, 142, 143, 144,
+ 145, 145, 146, 147, 148, 149, 150, 151, 152, 153, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 207, 208, 209, 210,
+ 211, 212, 214, 215, 216, 217, 218, 219, 221, 222, 223,
+ 224, 225, 227, 228, 229, 230, 231, 233, 234, 235, 236,
+ 238, 239, 240, 241, 243, 244, 245, 247, 248, 249, 250,
+ 252, 253, 254, 256, 257, 258, 260, 261, 262, 264, 265,
+ 266, 268, 269, 271, 272, 273, 275, 276, 277, 279, 280,
+ 282, 283, 285, 286, 287, 289, 290, 292, 293, 295, 296,
+ 298, 299, 301, 302, 304, 305, 307, 308, 310, 311, 313,
+ 314, 316, 317, 319, 320, 322, 324, 325, 327, 328, 330,
+ 331, 333, 335, 336, 338, 339, 341, 343, 344, 346, 348,
+ 349, 351, 353, 354, 356, 358, 359, 361, 363, 364, 366,
+ 368, 370, 371, 373, 375, 377, 378, 380, 382, 384, 385,
+ 387, 389, 391, 393, 394, 396, 398, 400, 402, 403, 405,
+ 407, 409, 411, 413, 415, 416, 418, 420, 422, 424, 426,
+ 428, 430, 432, 434, 436, 438, 439, 441, 443, 445, 447,
+ 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 470,
+ 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 492,
+ 495, 497, 499, 501, 503, 505, 508, 510, 512, 514, 516,
+ 518, 521, 523, 525, 527, 530, 532, 534, 536, 539, 541,
+ 543, 545, 548, 550, 552, 555, 557, 559, 562, 564, 566,
+ 569, 571, 573, 576, 578, 580, 583, 585, 588, 590, 592,
+ 595, 597, 600, 602, 605, 607, 610, 612, 615, 617, 620,
+ 622, 625, 627, 630, 632, 635, 637, 640, 642, 645, 648,
+ 650, 653, 655, 658, 661, 663, 666, 669, 671, 674, 677,
+ 679, 682, 685, 687, 690, 693, 695, 698, 701, 704, 706,
+ 709, 712, 715, 717, 720, 723, 726, 729, 732, 734, 737,
+ 740, 743, 746, 749, 751, 754, 757, 760, 763, 766, 769,
+ 772, 775, 778, 781, 784, 787, 790, 793, 796, 799, 802,
+ 805, 808, 811, 814, 817, 820, 823, 826, 829, 832, 835,
+ 838, 842, 845, 848, 851, 854, 857, 860, 864, 867, 870,
+ 873, 876, 880, 883, 886, 889, 893, 896, 899, 903, 906,
+ 909, 912, 916, 919, 922, 926, 929, 932, 936, 939, 943,
+ 946, 949, 953, 956, 960, 963, 967, 970, 973, 977, 980,
+ 984, 987, 991, 994, 998, 1002, 1005, 1009, 1012, 1016, 1019,
+ 1023,
+};
+
+static const u16 xgamma10_37[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 14,
+ 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15,
+ 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17,
+ 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 20,
+ 20, 20, 20, 20, 21, 21, 21, 21, 21, 22, 22,
+ 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27,
+ 28, 28, 28, 28, 29, 29, 29, 29, 30, 30, 30,
+ 31, 31, 31, 31, 32, 32, 32, 33, 33, 33, 33,
+ 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37,
+ 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41,
+ 41, 41, 42, 42, 42, 43, 43, 44, 44, 44, 45,
+ 45, 45, 46, 46, 47, 47, 47, 48, 48, 49, 49,
+ 49, 50, 50, 51, 51, 51, 52, 52, 53, 53, 54,
+ 54, 54, 55, 55, 56, 56, 57, 57, 58, 58, 58,
+ 59, 59, 60, 60, 61, 61, 62, 62, 63, 63, 64,
+ 64, 65, 65, 66, 66, 67, 67, 68, 68, 69, 69,
+ 70, 70, 71, 71, 72, 72, 73, 73, 74, 75, 75,
+ 76, 76, 77, 77, 78, 78, 79, 80, 80, 81, 81,
+ 82, 82, 83, 84, 84, 85, 85, 86, 87, 87, 88,
+ 89, 89, 90, 90, 91, 92, 92, 93, 94, 94, 95,
+ 96, 96, 97, 98, 98, 99, 100, 100, 101, 102, 102,
+ 103, 104, 104, 105, 106, 106, 107, 108, 109, 109, 110,
+ 111, 112, 112, 113, 114, 114, 115, 116, 117, 118, 118,
+ 119, 120, 121, 121, 122, 123, 124, 125, 125, 126, 127,
+ 128, 129, 129, 130, 131, 132, 133, 134, 134, 135, 136,
+ 137, 138, 139, 139, 140, 141, 142, 143, 144, 145, 146,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156,
+ 157, 158, 158, 159, 160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 197, 198, 199, 200, 201,
+ 202, 203, 204, 206, 207, 208, 209, 210, 211, 213, 214,
+ 215, 216, 217, 218, 220, 221, 222, 223, 225, 226, 227,
+ 228, 229, 231, 232, 233, 234, 236, 237, 238, 240, 241,
+ 242, 243, 245, 246, 247, 249, 250, 251, 253, 254, 255,
+ 257, 258, 259, 261, 262, 263, 265, 266, 268, 269, 270,
+ 272, 273, 275, 276, 277, 279, 280, 282, 283, 285, 286,
+ 288, 289, 291, 292, 294, 295, 297, 298, 300, 301, 303,
+ 304, 306, 307, 309, 310, 312, 313, 315, 316, 318, 320,
+ 321, 323, 324, 326, 328, 329, 331, 332, 334, 336, 337,
+ 339, 341, 342, 344, 346, 347, 349, 351, 352, 354, 356,
+ 358, 359, 361, 363, 364, 366, 368, 370, 372, 373, 375,
+ 377, 379, 380, 382, 384, 386, 388, 389, 391, 393, 395,
+ 397, 399, 401, 402, 404, 406, 408, 410, 412, 414, 416,
+ 418, 420, 421, 423, 425, 427, 429, 431, 433, 435, 437,
+ 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459,
+ 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 483,
+ 485, 487, 489, 491, 493, 496, 498, 500, 502, 504, 507,
+ 509, 511, 513, 515, 518, 520, 522, 524, 527, 529, 531,
+ 534, 536, 538, 541, 543, 545, 548, 550, 552, 555, 557,
+ 559, 562, 564, 567, 569, 571, 574, 576, 579, 581, 584,
+ 586, 589, 591, 593, 596, 598, 601, 603, 606, 609, 611,
+ 614, 616, 619, 621, 624, 626, 629, 632, 634, 637, 639,
+ 642, 645, 647, 650, 653, 655, 658, 661, 663, 666, 669,
+ 672, 674, 677, 680, 682, 685, 688, 691, 694, 696, 699,
+ 702, 705, 708, 710, 713, 716, 719, 722, 725, 728, 730,
+ 733, 736, 739, 742, 745, 748, 751, 754, 757, 760, 763,
+ 766, 769, 772, 775, 778, 781, 784, 787, 790, 793, 796,
+ 799, 802, 805, 809, 812, 815, 818, 821, 824, 827, 831,
+ 834, 837, 840, 843, 847, 850, 853, 856, 860, 863, 866,
+ 869, 873, 876, 879, 883, 886, 889, 893, 896, 899, 903,
+ 906, 910, 913, 916, 920, 923, 927, 930, 934, 937, 940,
+ 944, 947, 951, 954, 958, 961, 965, 969, 972, 976, 979,
+ 983, 986, 990, 994, 997, 1001, 1005, 1008, 1012, 1016, 1019,
+ 1023,
+};
+
+static const u16 xgamma10_38[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11,
+ 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12,
+ 12, 12, 13, 13, 13, 13, 13, 13, 13, 14, 14,
+ 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16,
+ 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18,
+ 18, 18, 18, 18, 19, 19, 19, 19, 19, 20, 20,
+ 20, 20, 20, 21, 21, 21, 21, 21, 22, 22, 22,
+ 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 25,
+ 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 28,
+ 28, 28, 28, 29, 29, 29, 29, 30, 30, 30, 31,
+ 31, 31, 31, 32, 32, 32, 33, 33, 33, 33, 34,
+ 34, 34, 35, 35, 35, 36, 36, 36, 37, 37, 37,
+ 38, 38, 38, 39, 39, 39, 40, 40, 40, 41, 41,
+ 41, 42, 42, 43, 43, 43, 44, 44, 44, 45, 45,
+ 46, 46, 46, 47, 47, 47, 48, 48, 49, 49, 49,
+ 50, 50, 51, 51, 52, 52, 52, 53, 53, 54, 54,
+ 55, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59,
+ 60, 60, 60, 61, 61, 62, 62, 63, 63, 64, 64,
+ 65, 65, 66, 66, 67, 67, 68, 68, 69, 69, 70,
+ 70, 71, 72, 72, 73, 73, 74, 74, 75, 75, 76,
+ 76, 77, 78, 78, 79, 79, 80, 81, 81, 82, 82,
+ 83, 83, 84, 85, 85, 86, 86, 87, 88, 88, 89,
+ 90, 90, 91, 92, 92, 93, 93, 94, 95, 95, 96,
+ 97, 97, 98, 99, 99, 100, 101, 102, 102, 103, 104,
+ 104, 105, 106, 106, 107, 108, 109, 109, 110, 111, 112,
+ 112, 113, 114, 115, 115, 116, 117, 118, 118, 119, 120,
+ 121, 122, 122, 123, 124, 125, 126, 126, 127, 128, 129,
+ 130, 130, 131, 132, 133, 134, 135, 136, 136, 137, 138,
+ 139, 140, 141, 142, 143, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151, 152, 153, 153, 154, 155, 156, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
+ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205,
+ 206, 207, 208, 210, 211, 212, 213, 214, 216, 217, 218,
+ 219, 220, 222, 223, 224, 225, 227, 228, 229, 230, 232,
+ 233, 234, 235, 237, 238, 239, 241, 242, 243, 245, 246,
+ 247, 249, 250, 251, 253, 254, 255, 257, 258, 259, 261,
+ 262, 264, 265, 266, 268, 269, 271, 272, 274, 275, 276,
+ 278, 279, 281, 282, 284, 285, 287, 288, 290, 291, 293,
+ 294, 296, 297, 299, 300, 302, 303, 305, 307, 308, 310,
+ 311, 313, 314, 316, 318, 319, 321, 323, 324, 326, 327,
+ 329, 331, 332, 334, 336, 337, 339, 341, 342, 344, 346,
+ 348, 349, 351, 353, 354, 356, 358, 360, 361, 363, 365,
+ 367, 369, 370, 372, 374, 376, 378, 379, 381, 383, 385,
+ 387, 389, 391, 392, 394, 396, 398, 400, 402, 404, 406,
+ 408, 410, 412, 413, 415, 417, 419, 421, 423, 425, 427,
+ 429, 431, 433, 435, 437, 439, 441, 443, 446, 448, 450,
+ 452, 454, 456, 458, 460, 462, 464, 466, 469, 471, 473,
+ 475, 477, 479, 482, 484, 486, 488, 490, 493, 495, 497,
+ 499, 501, 504, 506, 508, 511, 513, 515, 517, 520, 522,
+ 524, 527, 529, 531, 534, 536, 538, 541, 543, 546, 548,
+ 550, 553, 555, 558, 560, 562, 565, 567, 570, 572, 575,
+ 577, 580, 582, 585, 587, 590, 592, 595, 597, 600, 603,
+ 605, 608, 610, 613, 616, 618, 621, 623, 626, 629, 631,
+ 634, 637, 639, 642, 645, 648, 650, 653, 656, 658, 661,
+ 664, 667, 669, 672, 675, 678, 681, 684, 686, 689, 692,
+ 695, 698, 701, 703, 706, 709, 712, 715, 718, 721, 724,
+ 727, 730, 733, 736, 739, 742, 745, 748, 751, 754, 757,
+ 760, 763, 766, 769, 772, 775, 778, 781, 785, 788, 791,
+ 794, 797, 800, 803, 807, 810, 813, 816, 820, 823, 826,
+ 829, 832, 836, 839, 842, 846, 849, 852, 856, 859, 862,
+ 866, 869, 872, 876, 879, 883, 886, 889, 893, 896, 900,
+ 903, 907, 910, 914, 917, 921, 924, 928, 931, 935, 938,
+ 942, 945, 949, 953, 956, 960, 964, 967, 971, 974, 978,
+ 982, 986, 989, 993, 997, 1000, 1004, 1008, 1012, 1015, 1019,
+ 1023,
+};
+
+static const u16 xgamma10_39[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11,
+ 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14,
+ 14, 14, 14, 15, 15, 15, 15, 15, 15, 16, 16,
+ 16, 16, 16, 16, 17, 17, 17, 17, 17, 18, 18,
+ 18, 18, 18, 19, 19, 19, 19, 19, 20, 20, 20,
+ 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25,
+ 25, 26, 26, 26, 26, 27, 27, 27, 27, 28, 28,
+ 28, 28, 29, 29, 29, 29, 30, 30, 30, 31, 31,
+ 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34,
+ 35, 35, 35, 35, 36, 36, 36, 37, 37, 37, 38,
+ 38, 38, 39, 39, 39, 40, 40, 41, 41, 41, 42,
+ 42, 42, 43, 43, 43, 44, 44, 45, 45, 45, 46,
+ 46, 46, 47, 47, 48, 48, 48, 49, 49, 50, 50,
+ 51, 51, 51, 52, 52, 53, 53, 53, 54, 54, 55,
+ 55, 56, 56, 57, 57, 57, 58, 58, 59, 59, 60,
+ 60, 61, 61, 62, 62, 63, 63, 64, 64, 65, 65,
+ 66, 66, 67, 67, 68, 68, 69, 69, 70, 70, 71,
+ 71, 72, 73, 73, 74, 74, 75, 75, 76, 76, 77,
+ 78, 78, 79, 79, 80, 80, 81, 82, 82, 83, 83,
+ 84, 85, 85, 86, 87, 87, 88, 88, 89, 90, 90,
+ 91, 92, 92, 93, 94, 94, 95, 96, 96, 97, 98,
+ 98, 99, 100, 100, 101, 102, 102, 103, 104, 105, 105,
+ 106, 107, 107, 108, 109, 110, 110, 111, 112, 113, 113,
+ 114, 115, 116, 116, 117, 118, 119, 120, 120, 121, 122,
+ 123, 124, 124, 125, 126, 127, 128, 129, 129, 130, 131,
+ 132, 133, 134, 134, 135, 136, 137, 138, 139, 140, 141,
+ 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 153, 154, 155, 156, 157, 158, 159, 160, 161,
+ 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 190, 191, 192, 193, 194, 195, 196,
+ 198, 199, 200, 201, 202, 203, 204, 206, 207, 208, 209,
+ 210, 212, 213, 214, 215, 217, 218, 219, 220, 221, 223,
+ 224, 225, 227, 228, 229, 230, 232, 233, 234, 236, 237,
+ 238, 239, 241, 242, 243, 245, 246, 248, 249, 250, 252,
+ 253, 254, 256, 257, 259, 260, 261, 263, 264, 266, 267,
+ 269, 270, 271, 273, 274, 276, 277, 279, 280, 282, 283,
+ 285, 286, 288, 289, 291, 292, 294, 295, 297, 299, 300,
+ 302, 303, 305, 306, 308, 310, 311, 313, 314, 316, 318,
+ 319, 321, 323, 324, 326, 328, 329, 331, 333, 334, 336,
+ 338, 340, 341, 343, 345, 346, 348, 350, 352, 353, 355,
+ 357, 359, 361, 362, 364, 366, 368, 370, 372, 373, 375,
+ 377, 379, 381, 383, 385, 386, 388, 390, 392, 394, 396,
+ 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418,
+ 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440,
+ 442, 444, 446, 448, 451, 453, 455, 457, 459, 461, 463,
+ 466, 468, 470, 472, 474, 477, 479, 481, 483, 485, 488,
+ 490, 492, 494, 497, 499, 501, 504, 506, 508, 511, 513,
+ 515, 518, 520, 522, 525, 527, 529, 532, 534, 537, 539,
+ 541, 544, 546, 549, 551, 554, 556, 559, 561, 564, 566,
+ 569, 571, 574, 576, 579, 581, 584, 586, 589, 592, 594,
+ 597, 599, 602, 605, 607, 610, 613, 615, 618, 621, 623,
+ 626, 629, 632, 634, 637, 640, 643, 645, 648, 651, 654,
+ 656, 659, 662, 665, 668, 671, 673, 676, 679, 682, 685,
+ 688, 691, 694, 697, 700, 702, 705, 708, 711, 714, 717,
+ 720, 723, 726, 729, 732, 735, 739, 742, 745, 748, 751,
+ 754, 757, 760, 763, 766, 770, 773, 776, 779, 782, 786,
+ 789, 792, 795, 798, 802, 805, 808, 811, 815, 818, 821,
+ 825, 828, 831, 835, 838, 841, 845, 848, 852, 855, 858,
+ 862, 865, 869, 872, 876, 879, 883, 886, 890, 893, 897,
+ 900, 904, 907, 911, 914, 918, 922, 925, 929, 933, 936,
+ 940, 944, 947, 951, 955, 958, 962, 966, 969, 973, 977,
+ 981, 985, 988, 992, 996, 1000, 1004, 1007, 1011, 1015, 1019,
+ 1023,
+};
+
+static const u16 xgamma10_40[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10,
+ 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11,
+ 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12,
+ 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
+ 14, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16,
+ 16, 16, 17, 17, 17, 17, 17, 18, 18, 18, 18,
+ 18, 18, 19, 19, 19, 19, 19, 20, 20, 20, 20,
+ 21, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23,
+ 23, 23, 23, 24, 24, 24, 24, 25, 25, 25, 25,
+ 26, 26, 26, 26, 27, 27, 27, 27, 28, 28, 28,
+ 29, 29, 29, 29, 30, 30, 30, 30, 31, 31, 31,
+ 32, 32, 32, 33, 33, 33, 33, 34, 34, 34, 35,
+ 35, 35, 36, 36, 36, 37, 37, 37, 38, 38, 38,
+ 39, 39, 39, 40, 40, 40, 41, 41, 41, 42, 42,
+ 43, 43, 43, 44, 44, 44, 45, 45, 46, 46, 46,
+ 47, 47, 48, 48, 48, 49, 49, 50, 50, 50, 51,
+ 51, 52, 52, 53, 53, 53, 54, 54, 55, 55, 56,
+ 56, 57, 57, 57, 58, 58, 59, 59, 60, 60, 61,
+ 61, 62, 62, 63, 63, 64, 64, 65, 65, 66, 66,
+ 67, 67, 68, 68, 69, 69, 70, 70, 71, 72, 72,
+ 73, 73, 74, 74, 75, 75, 76, 77, 77, 78, 78,
+ 79, 79, 80, 81, 81, 82, 82, 83, 84, 84, 85,
+ 85, 86, 87, 87, 88, 89, 89, 90, 91, 91, 92,
+ 93, 93, 94, 95, 95, 96, 97, 97, 98, 99, 99,
+ 100, 101, 101, 102, 103, 104, 104, 105, 106, 106, 107,
+ 108, 109, 109, 110, 111, 112, 112, 113, 114, 115, 116,
+ 116, 117, 118, 119, 119, 120, 121, 122, 123, 123, 124,
+ 125, 126, 127, 128, 128, 129, 130, 131, 132, 133, 134,
+ 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165,
+ 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
+ 177, 178, 179, 180, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 192, 193, 194, 195, 196, 197, 199, 200, 201,
+ 202, 203, 204, 206, 207, 208, 209, 210, 212, 213, 214,
+ 215, 217, 218, 219, 220, 222, 223, 224, 226, 227, 228,
+ 229, 231, 232, 233, 235, 236, 237, 239, 240, 241, 243,
+ 244, 245, 247, 248, 250, 251, 252, 254, 255, 257, 258,
+ 259, 261, 262, 264, 265, 267, 268, 270, 271, 273, 274,
+ 276, 277, 279, 280, 282, 283, 285, 286, 288, 289, 291,
+ 292, 294, 296, 297, 299, 300, 302, 304, 305, 307, 308,
+ 310, 312, 313, 315, 317, 318, 320, 322, 323, 325, 327,
+ 328, 330, 332, 333, 335, 337, 339, 340, 342, 344, 346,
+ 348, 349, 351, 353, 355, 357, 358, 360, 362, 364, 366,
+ 368, 369, 371, 373, 375, 377, 379, 381, 383, 385, 386,
+ 388, 390, 392, 394, 396, 398, 400, 402, 404, 406, 408,
+ 410, 412, 414, 416, 418, 420, 422, 424, 426, 429, 431,
+ 433, 435, 437, 439, 441, 443, 445, 448, 450, 452, 454,
+ 456, 458, 461, 463, 465, 467, 469, 472, 474, 476, 478,
+ 481, 483, 485, 488, 490, 492, 495, 497, 499, 501, 504,
+ 506, 509, 511, 513, 516, 518, 521, 523, 525, 528, 530,
+ 533, 535, 538, 540, 543, 545, 548, 550, 553, 555, 558,
+ 560, 563, 565, 568, 570, 573, 576, 578, 581, 583, 586,
+ 589, 591, 594, 597, 599, 602, 605, 607, 610, 613, 616,
+ 618, 621, 624, 627, 629, 632, 635, 638, 641, 643, 646,
+ 649, 652, 655, 658, 660, 663, 666, 669, 672, 675, 678,
+ 681, 684, 687, 690, 693, 696, 699, 702, 705, 708, 711,
+ 714, 717, 720, 723, 726, 729, 732, 735, 739, 742, 745,
+ 748, 751, 754, 758, 761, 764, 767, 770, 774, 777, 780,
+ 783, 787, 790, 793, 797, 800, 803, 807, 810, 813, 817,
+ 820, 824, 827, 830, 834, 837, 841, 844, 848, 851, 855,
+ 858, 862, 865, 869, 872, 876, 879, 883, 886, 890, 894,
+ 897, 901, 905, 908, 912, 916, 919, 923, 927, 930, 934,
+ 938, 942, 945, 949, 953, 957, 960, 964, 968, 972, 976,
+ 980, 984, 987, 991, 995, 999, 1003, 1007, 1011, 1015, 1019,
+ 1023,
+};
+
+static const u16 *xgamma10_curves[GAMMA_CURVE_LENGTH] = {
+ &xgamma10_01[0],
+ &xgamma10_02[0],
+ &xgamma10_03[0],
+ &xgamma10_04[0],
+ &xgamma10_05[0],
+ &xgamma10_06[0],
+ &xgamma10_07[0],
+ &xgamma10_08[0],
+ &xgamma10_09[0],
+ &xgamma10_10[0],
+ &xgamma10_11[0],
+ &xgamma10_12[0],
+ &xgamma10_13[0],
+ &xgamma10_14[0],
+ &xgamma10_15[0],
+ &xgamma10_16[0],
+ &xgamma10_17[0],
+ &xgamma10_18[0],
+ &xgamma10_19[0],
+ &xgamma10_20[0],
+ &xgamma10_21[0],
+ &xgamma10_22[0],
+ &xgamma10_23[0],
+ &xgamma10_24[0],
+ &xgamma10_25[0],
+ &xgamma10_26[0],
+ &xgamma10_27[0],
+ &xgamma10_28[0],
+ &xgamma10_29[0],
+ &xgamma10_30[0],
+ &xgamma10_31[0],
+ &xgamma10_32[0],
+ &xgamma10_33[0],
+ &xgamma10_34[0],
+ &xgamma10_35[0],
+ &xgamma10_36[0],
+ &xgamma10_37[0],
+ &xgamma10_38[0],
+ &xgamma10_39[0],
+ &xgamma10_40[0],
+};
+#endif /* __XILINX_GAMMA_COEFF_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-gamma.c b/drivers/media/platform/xilinx/xilinx-gamma.c
new file mode 100644
index 000000000000..d7996d0f34fa
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-gamma.c
@@ -0,0 +1,543 @@
+/*
+ * Xilinx Gamma Correction IP
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-gamma-coeff.h"
+#include "xilinx-vip.h"
+
+#define XGAMMA_MIN_HEIGHT (64)
+#define XGAMMA_MAX_HEIGHT (4320)
+#define XGAMMA_DEF_HEIGHT (720)
+#define XGAMMA_MIN_WIDTH (64)
+#define XGAMMA_MAX_WIDTH (8192)
+#define XGAMMA_DEF_WIDTH (1280)
+
+#define XGAMMA_AP_CTRL (0x0000)
+#define XGAMMA_GIE (0x0004)
+#define XGAMMA_IER (0x0008)
+#define XGAMMA_ISR (0x000c)
+#define XGAMMA_WIDTH (0x0010)
+#define XGAMMA_HEIGHT (0x0018)
+#define XGAMMA_VIDEO_FORMAT (0x0020)
+#define XGAMMA_GAMMA_LUT_0_BASE (0x0800)
+#define XGAMMA_GAMMA_LUT_1_BASE (0x1000)
+#define XGAMMA_GAMMA_LUT_2_BASE (0x1800)
+
+#define XGAMMA_RESET_DEASSERT (0)
+#define XGAMMA_RESET_ASSERT (1)
+#define XGAMMA_START BIT(0)
+#define XGAMMA_AUTO_RESTART BIT(7)
+#define XGAMMA_STREAM_ON (XGAMMA_START | XGAMMA_AUTO_RESTART)
+
+enum xgamma_video_format {
+ XGAMMA_RGB = 0,
+};
+
+/**
+ * struct xgamma_dev - Xilinx Video Gamma LUT device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: Scaler sub-device media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @ctrl_handler: V4L2 Control Handler for R,G,B Gamma Controls
+ * @red_lut: Pointer to the gamma coefficient as per the Red Gamma control
+ * @green_lut: Pointer to the gamma coefficient as per the Green Gamma control
+ * @blue_lut: Pointer to the gamma coefficient as per the Blue Gamma control
+ * @color_depth: Color depth of the Video Gamma IP
+ * @gamma_table: Pointer to the table containing various gamma values
+ * @rst_gpio: GPIO reset line to bring VPSS Scaler out of reset
+ * @max_width: Maximum width supported by this instance.
+ * @max_height: Maximum height supported by this instance.
+ */
+struct xgamma_dev {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ const u16 *red_lut;
+ const u16 *green_lut;
+ const u16 *blue_lut;
+ u32 color_depth;
+ const u16 **gamma_table;
+ struct gpio_desc *rst_gpio;
+ u32 max_width;
+ u32 max_height;
+};
+
+static inline u32 xg_read(struct xgamma_dev *xg, u32 reg)
+{
+ u32 data;
+
+ data = xvip_read(&xg->xvip, reg);
+ dev_dbg(xg->xvip.dev,
+ "Reading 0x%x from reg offset 0x%x", data, reg);
+ return data;
+}
+
+static inline void xg_write(struct xgamma_dev *xg, u32 reg, u32 data)
+{
+ dev_dbg(xg->xvip.dev,
+ "Writing 0x%x to reg offset 0x%x", data, reg);
+ xvip_write(&xg->xvip, reg, data);
+#ifdef DEBUG
+ if (xg_read(xg, reg) != data)
+ dev_err(xg->xvip.dev,
+ "Write 0x%x does not match read back", data);
+#endif
+}
+
+static inline struct xgamma_dev *to_xg(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xgamma_dev, xvip.subdev);
+}
+
+static struct v4l2_mbus_framefmt *
+__xg_get_pad_format(struct xgamma_dev *xg,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(
+ &xg->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xg->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static void xg_set_lut_entries(struct xgamma_dev *xg,
+ const u16 *lut, const u32 lut_base)
+{
+ int itr;
+ u32 lut_offset, lut_data;
+
+ lut_offset = lut_base;
+ /* Write LUT Entries */
+ for (itr = 0; itr < BIT(xg->color_depth - 1); itr++) {
+ lut_data = (lut[2 * itr + 1] << 16) | lut[2 * itr];
+ xg_write(xg, lut_offset, lut_data);
+ lut_offset += 4;
+ }
+}
+
+static int xg_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xgamma_dev *xg = to_xg(subdev);
+
+ if (!enable) {
+ dev_dbg(xg->xvip.dev, "%s : Off", __func__);
+ gpiod_set_value_cansleep(xg->rst_gpio, XGAMMA_RESET_ASSERT);
+ gpiod_set_value_cansleep(xg->rst_gpio, XGAMMA_RESET_DEASSERT);
+ return 0;
+ }
+ dev_dbg(xg->xvip.dev, "%s : Started", __func__);
+
+ dev_dbg(xg->xvip.dev, "%s : Setting width %d and height %d",
+ __func__, xg->formats[XVIP_PAD_SINK].width,
+ xg->formats[XVIP_PAD_SINK].height);
+ xg_write(xg, XGAMMA_WIDTH, xg->formats[XVIP_PAD_SINK].width);
+ xg_write(xg, XGAMMA_HEIGHT, xg->formats[XVIP_PAD_SINK].height);
+ xg_write(xg, XGAMMA_VIDEO_FORMAT, XGAMMA_RGB);
+ xg_set_lut_entries(xg, xg->red_lut, XGAMMA_GAMMA_LUT_0_BASE);
+ xg_set_lut_entries(xg, xg->green_lut, XGAMMA_GAMMA_LUT_1_BASE);
+ xg_set_lut_entries(xg, xg->blue_lut, XGAMMA_GAMMA_LUT_2_BASE);
+
+ /* Start GAMMA Correction LUT Video IP */
+ xg_write(xg, XGAMMA_AP_CTRL, XGAMMA_STREAM_ON);
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops xg_video_ops = {
+ .s_stream = xg_s_stream,
+};
+
+static int xg_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xgamma_dev *xg = to_xg(subdev);
+
+ fmt->format = *__xg_get_pad_format(xg, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static int xg_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xgamma_dev *xg = to_xg(subdev);
+ struct v4l2_mbus_framefmt *__format;
+
+ __format = __xg_get_pad_format(xg, cfg, fmt->pad, fmt->which);
+ *__format = fmt->format;
+
+ if (fmt->pad == XVIP_PAD_SINK) {
+ if (__format->code != MEDIA_BUS_FMT_RBG888_1X24) {
+ dev_dbg(xg->xvip.dev,
+ "Unsupported sink media bus code format");
+ __format->code = MEDIA_BUS_FMT_RBG888_1X24;
+ }
+ }
+ __format->width = clamp_t(unsigned int, fmt->format.width,
+ XGAMMA_MIN_WIDTH, xg->max_width);
+ __format->height = clamp_t(unsigned int, fmt->format.height,
+ XGAMMA_MIN_HEIGHT, xg->max_height);
+
+ fmt->format = *__format;
+ /* Propagate to Source Pad */
+ __format = __xg_get_pad_format(xg, cfg, XVIP_PAD_SOURCE, fmt->which);
+ *__format = fmt->format;
+ return 0;
+}
+
+static int xg_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xgamma_dev *xg = to_xg(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xg->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xg->default_formats[XVIP_PAD_SOURCE];
+ return 0;
+}
+
+static int xg_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops xg_internal_ops = {
+ .open = xg_open,
+ .close = xg_close,
+};
+
+static const struct v4l2_subdev_pad_ops xg_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xg_get_format,
+ .set_fmt = xg_set_format,
+};
+
+static const struct v4l2_subdev_ops xg_ops = {
+ .video = &xg_video_ops,
+ .pad = &xg_pad_ops,
+};
+
+static int
+select_gamma(s32 value, const u16 **coeff, const u16 **xgamma_curves)
+{
+ if (!coeff)
+ return -EINVAL;
+ if (value <= 0 || value > GAMMA_CURVE_LENGTH)
+ return -EINVAL;
+
+ *coeff = *(xgamma_curves + value - 1);
+ return 0;
+}
+
+static int xg_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int rval;
+ struct xgamma_dev *xg =
+ container_of(ctrl->handler,
+ struct xgamma_dev, ctrl_handler);
+ dev_dbg(xg->xvip.dev, "%s called", __func__);
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_GAMMA_CORR_RED_GAMMA:
+ rval = select_gamma(ctrl->val, &xg->red_lut, xg->gamma_table);
+ if (rval < 0) {
+ dev_err(xg->xvip.dev, "Invalid Red Gamma");
+ return rval;
+ }
+ dev_dbg(xg->xvip.dev, "%s: Setting Red Gamma to %d.%d",
+ __func__, ctrl->val / 10, ctrl->val % 10);
+ xg_set_lut_entries(xg, xg->red_lut, XGAMMA_GAMMA_LUT_0_BASE);
+ break;
+ case V4L2_CID_XILINX_GAMMA_CORR_BLUE_GAMMA:
+ rval = select_gamma(ctrl->val, &xg->blue_lut, xg->gamma_table);
+ if (rval < 0) {
+ dev_err(xg->xvip.dev, "Invalid Blue Gamma");
+ return rval;
+ }
+ dev_dbg(xg->xvip.dev, "%s: Setting Blue Gamma to %d.%d",
+ __func__, ctrl->val / 10, ctrl->val % 10);
+ xg_set_lut_entries(xg, xg->blue_lut, XGAMMA_GAMMA_LUT_1_BASE);
+ break;
+ case V4L2_CID_XILINX_GAMMA_CORR_GREEN_GAMMA:
+ rval = select_gamma(ctrl->val, &xg->green_lut, xg->gamma_table);
+ if (rval < 0) {
+ dev_err(xg->xvip.dev, "Invalid Green Gamma");
+ return -EINVAL;
+ }
+ dev_dbg(xg->xvip.dev, "%s: Setting Green Gamma to %d.%d",
+ __func__, ctrl->val / 10, ctrl->val % 10);
+ xg_set_lut_entries(xg, xg->green_lut, XGAMMA_GAMMA_LUT_2_BASE);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops xg_ctrl_ops = {
+ .s_ctrl = xg_s_ctrl,
+};
+
+static struct v4l2_ctrl_config xg_ctrls[] = {
+ /* Red Gamma */
+ {
+ .ops = &xg_ctrl_ops,
+ .id = V4L2_CID_XILINX_GAMMA_CORR_RED_GAMMA,
+ .name = "Red Gamma Correction|1->0.1|10->1.0",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 40,
+ .step = 1,
+ .def = 10,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Blue Gamma */
+ {
+ .ops = &xg_ctrl_ops,
+ .id = V4L2_CID_XILINX_GAMMA_CORR_BLUE_GAMMA,
+ .name = "Blue Gamma Correction|1->0.1|10->1.0",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 40,
+ .step = 1,
+ .def = 10,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Green Gamma */
+ {
+ .ops = &xg_ctrl_ops,
+ .id = V4L2_CID_XILINX_GAMMA_CORR_GREEN_GAMMA,
+ .name = "Green Gamma Correction|1->0.1|10->1.0)",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 40,
+ .step = 1,
+ .def = 10,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+};
+
+static const struct media_entity_operations xg_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int xg_parse_of(struct xgamma_dev *xg)
+{
+ struct device *dev = xg->xvip.dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id = 0;
+ int rval;
+
+ rval = of_property_read_u32(node, "xlnx,max-height", &xg->max_height);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (xg->max_height > XGAMMA_MAX_HEIGHT ||
+ xg->max_height < XGAMMA_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,max-width", &xg->max_width);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (xg->max_width > XGAMMA_MAX_WIDTH ||
+ xg->max_width < XGAMMA_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ rval = of_property_read_u32(port, "reg", &port_id);
+ if (rval < 0) {
+ dev_err(dev, "No reg in DT");
+ return rval;
+ }
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "Invalid reg in DT");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(port, "xlnx,video-width",
+ &xg->color_depth);
+ if (rval < 0) {
+ dev_err(dev, "Missing xlnx-video-width in DT");
+ return rval;
+ }
+ switch (xg->color_depth) {
+ case GAMMA_BPC_8:
+ xg->gamma_table = xgamma8_curves;
+ break;
+ case GAMMA_BPC_10:
+ xg->gamma_table = xgamma10_curves;
+ break;
+ default:
+ dev_err(dev, "Unsupported color depth %d",
+ xg->color_depth);
+ return -EINVAL;
+ }
+ }
+ }
+
+ xg->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xg->rst_gpio)) {
+ if (PTR_ERR(xg->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(xg->rst_gpio);
+ }
+ return 0;
+}
+
+static int xg_probe(struct platform_device *pdev)
+{
+ struct xgamma_dev *xg;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *def_fmt;
+ int rval, itr;
+
+ dev_dbg(&pdev->dev, "Gamma LUT Probe Started");
+ xg = devm_kzalloc(&pdev->dev, sizeof(*xg), GFP_KERNEL);
+ if (!xg)
+ return -ENOMEM;
+ xg->xvip.dev = &pdev->dev;
+ rval = xg_parse_of(xg);
+ if (rval < 0)
+ return rval;
+ rval = xvip_init_resources(&xg->xvip);
+
+ dev_dbg(xg->xvip.dev, "Reset Xilinx Video Gamma Corrrection");
+ gpiod_set_value_cansleep(xg->rst_gpio, XGAMMA_RESET_DEASSERT);
+
+ /* Init V4L2 subdev */
+ subdev = &xg->xvip.subdev;
+ v4l2_subdev_init(subdev, &xg_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xg_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Default Formats Initialization */
+ def_fmt = &xg->default_formats[XVIP_PAD_SINK];
+ /* GAMMA LUT IP only to be supported for RGB */
+ def_fmt->code = MEDIA_BUS_FMT_RBG888_1X24;
+ def_fmt->field = V4L2_FIELD_NONE;
+ def_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ def_fmt->width = XGAMMA_DEF_WIDTH;
+ def_fmt->height = XGAMMA_DEF_HEIGHT;
+ xg->formats[XVIP_PAD_SINK] = *def_fmt;
+
+ def_fmt = &xg->default_formats[XVIP_PAD_SOURCE];
+ *def_fmt = xg->default_formats[XVIP_PAD_SINK];
+ xg->formats[XVIP_PAD_SOURCE] = *def_fmt;
+
+ xg->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xg->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Init Media Entity */
+ subdev->entity.ops = &xg_media_ops;
+ rval = media_entity_pads_init(&subdev->entity, 2, xg->pads);
+ if (rval < 0)
+ goto media_error;
+
+ /* V4L2 Controls */
+ v4l2_ctrl_handler_init(&xg->ctrl_handler, ARRAY_SIZE(xg_ctrls));
+ for (itr = 0; itr < ARRAY_SIZE(xg_ctrls); itr++) {
+ v4l2_ctrl_new_custom(&xg->ctrl_handler,
+ &xg_ctrls[itr], NULL);
+ }
+ if (xg->ctrl_handler.error) {
+ dev_err(&pdev->dev, "Failed to add V4L2 controls");
+ rval = xg->ctrl_handler.error;
+ goto ctrl_error;
+ }
+ subdev->ctrl_handler = &xg->ctrl_handler;
+ rval = v4l2_ctrl_handler_setup(&xg->ctrl_handler);
+ if (rval < 0) {
+ dev_err(&pdev->dev, "Failed to setup control handler");
+ goto ctrl_error;
+ }
+
+ platform_set_drvdata(pdev, xg);
+ rval = v4l2_async_register_subdev(subdev);
+ if (rval < 0) {
+ dev_err(&pdev->dev, "failed to register subdev");
+ goto v4l2_subdev_error;
+ }
+ dev_info(&pdev->dev,
+ "Xilinx %d-bit Video Gamma Correction LUT registered",
+ xg->color_depth);
+ return 0;
+ctrl_error:
+ v4l2_ctrl_handler_free(&xg->ctrl_handler);
+v4l2_subdev_error:
+ media_entity_cleanup(&subdev->entity);
+media_error:
+ xvip_cleanup_resources(&xg->xvip);
+ return rval;
+}
+
+static int xg_remove(struct platform_device *pdev)
+{
+ struct xgamma_dev *xg = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xg->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ /* Add entry to cleanup v4l2 control handle */
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xg->xvip);
+ return 0;
+}
+
+static const struct of_device_id xg_of_id_table[] = {
+ {.compatible = "xlnx,v-gamma-lut"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xg_of_id_table);
+
+static struct platform_driver xg_driver = {
+ .driver = {
+ .name = "xilinx-gamma-lut",
+ .of_match_table = xg_of_id_table,
+ },
+ .probe = xg_probe,
+ .remove = xg_remove,
+};
+
+module_platform_driver(xg_driver);
+MODULE_DESCRIPTION("Xilinx Video Gamma Correction LUT Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-hls-common.h b/drivers/media/platform/xilinx/xilinx-hls-common.h
new file mode 100644
index 000000000000..8ecc3cfb8a83
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-hls-common.h
@@ -0,0 +1,36 @@
+/*
+ * Xilinx HLS common header
+ *
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Radhey Shyam Pandey <radheys@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_HLS_COMMON_H__
+#define __XILINX_HLS_COMMON_H__
+
+#include <linux/bitops.h>
+
+#define XHLS_DEF_WIDTH 1920
+#define XHLS_DEF_HEIGHT 1080
+
+#define XHLS_REG_CTRL_DONE BIT(1)
+#define XHLS_REG_CTRL_IDLE BIT(2)
+#define XHLS_REG_CTRL_READY BIT(3)
+#define XHLS_REG_CTRL_AUTO_RESTART BIT(7)
+#define XHLS_REG_GIE 0x04
+#define XHLS_REG_GIE_GIE BIT(0)
+#define XHLS_REG_IER 0x08
+#define XHLS_REG_IER_DONE BIT(0)
+#define XHLS_REG_IER_READY BIT(1)
+#define XHLS_REG_ISR 0x0c
+#define XHLS_REG_ISR_DONE BIT(0)
+#define XHLS_REG_ISR_READY BIT(1)
+#define XHLS_REG_ROWS 0x10
+#define XHLS_REG_COLS 0x18
+
+#endif /* __XILINX_HLS_COMMON_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-hls.c b/drivers/media/platform/xilinx/xilinx-hls.c
new file mode 100644
index 000000000000..fc42977440a9
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-hls.c
@@ -0,0 +1,481 @@
+/*
+ * Xilinx HLS Core
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/xilinx-hls.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-hls-common.h"
+#include "xilinx-vip.h"
+
+/**
+ * struct xhls_device - Xilinx HLS Core device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @compatible: first DT compatible string for the device
+ * @formats: active V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: format information corresponding to the pads active formats
+ * @model: additional description of IP implementation if available
+ * @ctrl_handler: control handler
+ * @user_mem: user portion of the register space
+ * @user_mem_size: size of the user portion of the register space
+ */
+struct xhls_device {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+
+ const char *compatible;
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *model;
+
+ void __iomem *user_mem;
+ size_t user_mem_size;
+};
+
+static inline struct xhls_device *to_hls(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xhls_device, xvip.subdev);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+static const struct v4l2_ctrl_config xhls_model_ctrl = {
+ .id = V4L2_CID_XILINX_HLS_MODEL,
+ .name = "HLS Model",
+ .type = V4L2_CTRL_TYPE_STRING,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+};
+
+static int xhls_create_controls(struct xhls_device *xhls)
+{
+ struct v4l2_ctrl_config model = xhls_model_ctrl;
+ struct v4l2_ctrl *ctrl;
+
+ model.max = strlen(xhls->compatible);
+ model.min = model.max;
+
+ v4l2_ctrl_handler_init(&xhls->ctrl_handler, 1);
+
+ ctrl = v4l2_ctrl_new_custom(&xhls->ctrl_handler, &model, NULL);
+
+ if (xhls->ctrl_handler.error) {
+ dev_err(xhls->xvip.dev, "failed to add controls\n");
+ return xhls->ctrl_handler.error;
+ }
+
+ v4l2_ctrl_s_ctrl_string(ctrl, xhls->compatible);
+
+ xhls->xvip.subdev.ctrl_handler = &xhls->ctrl_handler;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static int xhls_user_read(struct xhls_device *xhls,
+ struct xilinx_axi_hls_registers *regs)
+{
+ unsigned int i;
+ u32 offset;
+ u32 value;
+
+ if (regs->num_regs >= xhls->user_mem_size / 4)
+ return -EINVAL;
+
+ for (i = 0; i < regs->num_regs; ++i) {
+ if (copy_from_user(&offset, &regs->regs[i].offset,
+ sizeof(offset)))
+ return -EFAULT;
+
+ if (offset >= xhls->user_mem_size || offset & 3)
+ return -EINVAL;
+
+ value = ioread32(xhls->user_mem + offset);
+
+ if (copy_to_user(&regs->regs[i].value, &value, sizeof(value)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int xhls_user_write(struct xhls_device *xhls,
+ struct xilinx_axi_hls_registers *regs)
+{
+ struct xilinx_axi_hls_register reg;
+ unsigned int i;
+
+ if (regs->num_regs >= xhls->user_mem_size / 4)
+ return -EINVAL;
+
+ for (i = 0; i < regs->num_regs; ++i) {
+ if (copy_from_user(&reg, &regs->regs[i], sizeof(reg)))
+ return -EFAULT;
+
+ if (reg.offset >= xhls->user_mem_size || reg.offset & 3)
+ return -EINVAL;
+
+ iowrite32(reg.value, xhls->user_mem + reg.offset);
+ }
+
+ return 0;
+}
+
+static long xhls_ioctl(struct v4l2_subdev *subdev, unsigned int cmd, void *arg)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+
+ switch (cmd) {
+ case XILINX_AXI_HLS_READ:
+ return xhls_user_read(xhls, arg);
+ case XILINX_AXI_HLS_WRITE:
+ return xhls_user_write(xhls, arg);
+ }
+
+ return -ENOTTY;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xhls_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+ struct v4l2_mbus_framefmt *format = &xhls->formats[XVIP_PAD_SINK];
+
+ if (!enable) {
+ xvip_write(&xhls->xvip, XVIP_CTRL_CONTROL, 0);
+ return 0;
+ }
+
+ xvip_write(&xhls->xvip, XHLS_REG_COLS, format->width);
+ xvip_write(&xhls->xvip, XHLS_REG_ROWS, format->height);
+
+ xvip_write(&xhls->xvip, XVIP_CTRL_CONTROL,
+ XHLS_REG_CTRL_AUTO_RESTART | XVIP_CTRL_CONTROL_SW_ENABLE);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xhls_get_pad_format(struct xhls_device *xhls,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xhls->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xhls->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xhls_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+
+ fmt->format = *__xhls_get_pad_format(xhls, cfg, fmt->pad, fmt->which);
+
+ return 0;
+}
+
+static int xhls_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xhls_get_pad_format(xhls, cfg, fmt->pad, fmt->which);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ xvip_set_format_size(format, fmt);
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = __xhls_get_pad_format(xhls, cfg, XVIP_PAD_SOURCE,
+ fmt->which);
+
+ xvip_set_format_size(format, fmt);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int xhls_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xhls->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xhls->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xhls_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops xhls_core_ops = {
+ .ioctl = xhls_ioctl,
+};
+
+static struct v4l2_subdev_video_ops xhls_video_ops = {
+ .s_stream = xhls_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xhls_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xhls_get_format,
+ .set_fmt = xhls_set_format,
+};
+
+static struct v4l2_subdev_ops xhls_ops = {
+ .core = &xhls_core_ops,
+ .video = &xhls_video_ops,
+ .pad = &xhls_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xhls_internal_ops = {
+ .open = xhls_open,
+ .close = xhls_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xhls_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static void xhls_init_formats(struct xhls_device *xhls)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize default and active formats */
+ format = &xhls->default_formats[XVIP_PAD_SINK];
+ format->code = xhls->vip_formats[XVIP_PAD_SINK]->code;
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ format->width = xvip_read(&xhls->xvip, XHLS_REG_COLS);
+ format->height = xvip_read(&xhls->xvip, XHLS_REG_ROWS);
+
+ xhls->formats[XVIP_PAD_SINK] = *format;
+
+ format = &xhls->default_formats[XVIP_PAD_SOURCE];
+ *format = xhls->default_formats[XVIP_PAD_SINK];
+ format->code = xhls->vip_formats[XVIP_PAD_SOURCE]->code;
+
+ xhls->formats[XVIP_PAD_SOURCE] = *format;
+}
+
+static int xhls_parse_of(struct xhls_device *xhls)
+{
+ struct device *dev = xhls->xvip.dev;
+ struct device_node *node = xhls->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_string(node, "compatible", &xhls->compatible);
+ if (ret < 0)
+ return -EINVAL;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "no reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "invalid reg in DT");
+ return -EINVAL;
+ }
+
+ xhls->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ return 0;
+}
+
+static int xhls_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xhls_device *xhls;
+ struct resource *mem;
+ int ret;
+
+ xhls = devm_kzalloc(&pdev->dev, sizeof(*xhls), GFP_KERNEL);
+ if (!xhls)
+ return -ENOMEM;
+
+ xhls->xvip.dev = &pdev->dev;
+
+ ret = xhls_parse_of(xhls);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xhls->xvip);
+ if (ret < 0)
+ return ret;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ xhls->user_mem = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(xhls->user_mem))
+ return PTR_ERR(xhls->user_mem);
+ xhls->user_mem_size = resource_size(mem);
+
+ /* Reset and initialize the core */
+ xvip_reset(&xhls->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xhls->xvip.subdev;
+ v4l2_subdev_init(subdev, &xhls_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xhls_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xhls);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ xhls_init_formats(xhls);
+
+ xhls->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xhls->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xhls_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xhls->pads);
+ if (ret < 0)
+ goto error;
+
+ ret = xhls_create_controls(xhls);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xhls);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ dev_info(xhls->xvip.dev, "device %s found\n", xhls->compatible);
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xhls->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xhls->xvip);
+ return ret;
+}
+
+static int xhls_remove(struct platform_device *pdev)
+{
+ struct xhls_device *xhls = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xhls->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xhls->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xhls->xvip);
+
+ return 0;
+}
+
+static const struct of_device_id xhls_of_id_table[] = {
+ { .compatible = "xlnx,v-hls" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xhls_of_id_table);
+
+static struct platform_driver xhls_driver = {
+ .driver = {
+ .name = "xilinx-hls",
+ .of_match_table = xhls_of_id_table,
+ },
+ .probe = xhls_probe,
+ .remove = xhls_remove,
+};
+
+module_platform_driver(xhls_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx HLS Core Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-m2m.c b/drivers/media/platform/xilinx/xilinx-m2m.c
new file mode 100644
index 000000000000..5a932a45e571
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-m2m.c
@@ -0,0 +1,2106 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx V4L2 mem2mem driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#include <drm/drm_fourcc.h>
+#include <linux/delay.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/lcm.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "xilinx-vip.h"
+
+#define XVIP_M2M_NAME "xilinx-mem2mem"
+#define XVIP_M2M_DEFAULT_FMT V4L2_PIX_FMT_RGB24
+
+/* Minimum and maximum widths are expressed in bytes */
+#define XVIP_M2M_MIN_WIDTH 1U
+#define XVIP_M2M_MAX_WIDTH 65535U
+#define XVIP_M2M_MIN_HEIGHT 1U
+#define XVIP_M2M_MAX_HEIGHT 8191U
+
+#define XVIP_M2M_DEF_WIDTH 1920
+#define XVIP_M2M_DEF_HEIGHT 1080
+
+#define XVIP_M2M_PAD_SINK 1
+#define XVIP_M2M_PAD_SOURCE 0
+
+/**
+ * struct xvip_graph_entity - Entity in the video graph
+ * @list: list entry in a graph entities list
+ * @node: the entity's DT node
+ * @entity: media entity, from the corresponding V4L2 subdev
+ * @asd: subdev asynchronous registration information
+ * @subdev: V4L2 subdev
+ * @streaming: status of the V4L2 subdev if streaming or not
+ */
+struct xvip_graph_entity {
+ struct list_head list;
+ struct device_node *node;
+ struct media_entity *entity;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *subdev;
+ bool streaming;
+};
+
+/**
+ * struct xvip_pipeline - Xilinx Video IP pipeline structure
+ * @pipe: media pipeline
+ * @lock: protects the pipeline @stream_count
+ * @use_count: number of DMA engines using the pipeline
+ * @stream_count: number of DMA engines currently streaming
+ * @num_dmas: number of DMA engines in the pipeline
+ * @xdev: Composite device the pipe belongs to
+ */
+struct xvip_pipeline {
+ struct media_pipeline pipe;
+
+ /* protects the pipeline @stream_count */
+ struct mutex lock;
+ unsigned int use_count;
+ unsigned int stream_count;
+
+ unsigned int num_dmas;
+ struct xvip_m2m_dev *xdev;
+};
+
+struct xventity_list {
+ struct list_head list;
+ struct media_entity *entity;
+};
+
+/**
+ * struct xvip_m2m_dev - Xilinx Video mem2mem device structure
+ * @v4l2_dev: V4L2 device
+ * @dev: (OF) device
+ * @media_dev: media device
+ * @notifier: V4L2 asynchronous subdevs notifier
+ * @entities: entities in the graph as a list of xvip_graph_entity
+ * @num_subdevs: number of subdevs in the pipeline
+ * @lock: This is to protect mem2mem context structure data
+ * @queued_lock: This is to protect video buffer information
+ * @dma: Video DMA channels
+ * @m2m_dev: V4L2 mem2mem device structure
+ * @v4l2_caps: V4L2 capabilities of the whole device
+ */
+struct xvip_m2m_dev {
+ struct v4l2_device v4l2_dev;
+ struct device *dev;
+
+ struct media_device media_dev;
+ struct v4l2_async_notifier notifier;
+ struct list_head entities;
+ unsigned int num_subdevs;
+
+ /* Protects to m2m context data */
+ struct mutex lock;
+
+ /* Protects vb2_v4l2_buffer data */
+ spinlock_t queued_lock;
+ struct xvip_m2m_dma *dma;
+ struct v4l2_m2m_dev *m2m_dev;
+ u32 v4l2_caps;
+};
+
+static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
+{
+ return container_of(e->pipe, struct xvip_pipeline, pipe);
+}
+
+/**
+ * struct xvip_m2m_dma - Video DMA channel
+ * @video: V4L2 video device associated with the DMA channel
+ * @xdev: composite mem2mem device the DMA channels belongs to
+ * @chan_tx: DMA engine channel for MEM2DEV transfer
+ * @chan_rx: DMA engine channel for DEV2MEM transfer
+ * @outfmt: active V4L2 OUTPUT port pixel format
+ * @capfmt: active V4L2 CAPTURE port pixel format
+ * @r: crop rectangle parameters
+ * @outinfo: format information corresponding to the active @outfmt
+ * @capinfo: format information corresponding to the active @capfmt
+ * @align: transfer alignment required by the DMA channel (in bytes)
+ * @crop: boolean flag to indicate if crop is requested
+ * @pads: media pads for the video M2M device entity
+ * @pipe: pipeline belonging to the DMA channel
+ */
+struct xvip_m2m_dma {
+ struct video_device video;
+ struct xvip_m2m_dev *xdev;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+ struct v4l2_format outfmt;
+ struct v4l2_format capfmt;
+ struct v4l2_rect r;
+ const struct xvip_video_format *outinfo;
+ const struct xvip_video_format *capinfo;
+ u32 align;
+ bool crop;
+
+ struct media_pad pads[2];
+ struct xvip_pipeline pipe;
+};
+
+/**
+ * struct xvip_m2m_ctx - VIPP mem2mem context
+ * @fh: V4L2 file handler
+ * @xdev: composite mem2mem device the DMA channels belongs to
+ * @xt: dma interleaved template for dma configuration
+ * @sgl: data chunk structure for dma_interleaved_template
+ */
+struct xvip_m2m_ctx {
+ struct v4l2_fh fh;
+ struct xvip_m2m_dev *xdev;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+};
+
+static inline struct xvip_m2m_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct xvip_m2m_ctx, fh);
+}
+
+static struct v4l2_subdev *
+xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_pad(local);
+ if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+static int xvip_dma_verify_format(struct xvip_m2m_dma *dma)
+{
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ int ret;
+ int width, height;
+
+ subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SOURCE], &fmt.pad);
+ if (!subdev)
+ return -EPIPE;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ if (dma->outinfo->code != fmt.format.code)
+ return -EINVAL;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->outfmt.type)) {
+ width = dma->outfmt.fmt.pix_mp.width;
+ height = dma->outfmt.fmt.pix_mp.height;
+ } else {
+ width = dma->outfmt.fmt.pix.width;
+ height = dma->outfmt.fmt.pix.height;
+ }
+
+ if (width != fmt.format.width || height != fmt.format.height)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define to_xvip_dma(vdev) container_of(vdev, struct xvip_m2m_dma, video)
+/* -----------------------------------------------------------------------------
+ * Pipeline Stream Management
+ */
+
+/**
+ * xvip_subdev_set_streaming - Find and update streaming status of subdev
+ * @xdev: Composite video device
+ * @subdev: V4L2 sub-device
+ * @enable: enable/disable streaming status
+ *
+ * Walk the xvip graph entities list and find if subdev is present. Returns
+ * streaming status of subdev and update the status as requested
+ *
+ * Return: streaming status (true or false) if successful or warn_on if subdev
+ * is not present and return false
+ */
+static bool xvip_subdev_set_streaming(struct xvip_m2m_dev *xdev,
+ struct v4l2_subdev *subdev, bool enable)
+{
+ struct xvip_graph_entity *entity;
+
+ list_for_each_entry(entity, &xdev->entities, list)
+ if (entity->node == subdev->dev->of_node) {
+ bool status = entity->streaming;
+
+ entity->streaming = enable;
+ return status;
+ }
+
+ WARN(1, "Should never get here\n");
+ return false;
+}
+
+static int xvip_entity_start_stop(struct xvip_m2m_dev *xdev,
+ struct media_entity *entity, bool start)
+{
+ struct v4l2_subdev *subdev;
+ bool is_streaming;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "%s entity %s\n",
+ start ? "Starting" : "Stopping", entity->name);
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ /* This is to maintain list of stream on/off devices */
+ is_streaming = xvip_subdev_set_streaming(xdev, subdev, start);
+
+ /*
+ * start or stop the subdev only once in case if they are
+ * shared between sub-graphs
+ */
+ if (start && !is_streaming) {
+ /* power-on subdevice */
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_power on failed on subdev\n");
+ xvip_subdev_set_streaming(xdev, subdev, 0);
+ return ret;
+ }
+
+ /* stream-on subdevice */
+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_stream on failed on subdev\n");
+ v4l2_subdev_call(subdev, core, s_power, 0);
+ xvip_subdev_set_streaming(xdev, subdev, 0);
+ }
+ } else if (!start && is_streaming) {
+ /* stream-off subdevice */
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_stream off failed on subdev\n");
+ xvip_subdev_set_streaming(xdev, subdev, 1);
+ }
+
+ /* power-off subdevice */
+ ret = v4l2_subdev_call(subdev, core, s_power, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ dev_err(xdev->dev,
+ "s_power off failed on subdev\n");
+ }
+
+ return ret;
+}
+
+/**
+ * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
+ * @xdev: Composite video device
+ * @dma: xvip dma
+ * @start: Start (when true) or stop (when false) the pipeline
+ *
+ * Walk the entities chain starting @dma and start or stop all of them
+ *
+ * Return: 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int xvip_pipeline_start_stop(struct xvip_m2m_dev *xdev,
+ struct xvip_m2m_dma *dma, bool start)
+{
+ struct media_graph graph;
+ struct media_entity *entity = &dma->video.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct xventity_list *temp, *_temp;
+ LIST_HEAD(ent_list);
+ int ret = 0;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Walk the graph to locate the subdev nodes */
+ ret = media_graph_walk_init(&graph, mdev);
+ if (ret)
+ goto error;
+
+ media_graph_walk_start(&graph, entity);
+
+ /* get the list of entities */
+ while ((entity = media_graph_walk_next(&graph))) {
+ struct xventity_list *ele;
+
+ /* We want to stream on/off only subdevs */
+ if (!is_media_entity_v4l2_subdev(entity))
+ continue;
+
+ /* Maintain the pipeline sequence in a list */
+ ele = kzalloc(sizeof(*ele), GFP_KERNEL);
+ if (!ele) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ele->entity = entity;
+ list_add(&ele->list, &ent_list);
+ }
+
+ if (start) {
+ list_for_each_entry_safe(temp, _temp, &ent_list, list) {
+ /* Enable all subdevs from sink to source */
+ ret = xvip_entity_start_stop(xdev, temp->entity, start);
+ if (ret < 0) {
+ dev_err(xdev->dev, "ret = %d for entity %s\n",
+ ret, temp->entity->name);
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry_safe_reverse(temp, _temp, &ent_list, list)
+ /* Enable all subdevs from source to sink */
+ xvip_entity_start_stop(xdev, temp->entity, start);
+ }
+
+ list_for_each_entry_safe(temp, _temp, &ent_list, list) {
+ list_del(&temp->list);
+ kfree(temp);
+ }
+
+error:
+ mutex_unlock(&mdev->graph_mutex);
+ media_graph_walk_cleanup(&graph);
+ return ret;
+}
+
+/**
+ * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
+ * @pipe: The pipeline
+ * @on: Turn the stream on when true or off when false
+ *
+ * The pipeline is shared between all DMA engines connect at its input and
+ * output. While the stream state of DMA engines can be controlled
+ * independently, pipelines have a shared stream state that enable or disable
+ * all entities in the pipeline. For this reason the pipeline uses a streaming
+ * counter that tracks the number of DMA engines that have requested the stream
+ * to be enabled. This will walk the graph starting from each DMA and enable or
+ * disable the entities in the path.
+ *
+ * When called with the @on argument set to true, this function will increment
+ * the pipeline streaming count. If the streaming count reaches the number of
+ * DMA engines in the pipeline it will enable all entities that belong to the
+ * pipeline.
+ *
+ * Similarly, when called with the @on argument set to false, this function will
+ * decrement the pipeline streaming count and disable all entities in the
+ * pipeline when the streaming count reaches zero.
+ *
+ * Return: 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise. Stopping the pipeline never fails. The pipeline state is
+ * not updated when the operation fails.
+ */
+static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
+{
+ struct xvip_m2m_dev *xdev;
+ struct xvip_m2m_dma *dma;
+ int ret = 0;
+
+ mutex_lock(&pipe->lock);
+ xdev = pipe->xdev;
+ dma = xdev->dma;
+
+ if (on) {
+ ret = xvip_pipeline_start_stop(xdev, dma, true);
+ if (ret < 0)
+ goto done;
+ pipe->stream_count++;
+ } else {
+ if (--pipe->stream_count == 0)
+ xvip_pipeline_start_stop(xdev, dma, false);
+ }
+
+done:
+ mutex_unlock(&pipe->lock);
+ return ret;
+}
+
+static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
+ struct xvip_m2m_dma *start)
+{
+ struct media_graph graph;
+ struct media_entity *entity = &start->video.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ unsigned int num_inputs = 0;
+ unsigned int num_outputs = 0;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Walk the graph to locate the video nodes. */
+ ret = media_graph_walk_init(&graph, mdev);
+ if (ret) {
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+ }
+
+ media_graph_walk_start(&graph, entity);
+
+ while ((entity = media_graph_walk_next(&graph))) {
+ struct xvip_m2m_dma *dma;
+
+ if (entity->function != MEDIA_ENT_F_IO_V4L)
+ continue;
+
+ dma = to_xvip_dma(media_entity_to_video_device(entity));
+
+ num_outputs++;
+ num_inputs++;
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ media_graph_walk_cleanup(&graph);
+
+ /* We need at least one DMA to proceed */
+ if (num_outputs == 0 && num_inputs == 0)
+ return -EPIPE;
+
+ pipe->num_dmas = num_inputs + num_outputs;
+ pipe->xdev = start->xdev;
+
+ return 0;
+}
+
+static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
+{
+ pipe->num_dmas = 0;
+}
+
+/**
+ * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
+ * @pipe: the pipeline
+ *
+ * Decrease the pipeline use count and clean it up if we were the last user.
+ */
+static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
+{
+ mutex_lock(&pipe->lock);
+
+ /* If we're the last user clean up the pipeline. */
+ if (--pipe->use_count == 0)
+ __xvip_pipeline_cleanup(pipe);
+
+ mutex_unlock(&pipe->lock);
+}
+
+/**
+ * xvip_pipeline_prepare - Prepare the pipeline for streaming
+ * @pipe: the pipeline
+ * @dma: DMA engine at one end of the pipeline
+ *
+ * Validate the pipeline if no user exists yet, otherwise just increase the use
+ * count.
+ *
+ * Return: 0 if successful or -EPIPE if the pipeline is not valid.
+ */
+static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
+ struct xvip_m2m_dma *dma)
+{
+ int ret;
+
+ mutex_lock(&pipe->lock);
+
+ /* If we're the first user validate and initialize the pipeline. */
+ if (pipe->use_count == 0) {
+ ret = xvip_pipeline_validate(pipe, dma);
+ if (ret < 0) {
+ __xvip_pipeline_cleanup(pipe);
+ goto done;
+ }
+ }
+
+ pipe->use_count++;
+ ret = 0;
+
+done:
+ mutex_unlock(&pipe->lock);
+ return ret;
+}
+
+static void xvip_m2m_dma_callback_mem2dev(void *data)
+{
+}
+
+static void xvip_m2m_dma_callback(void *data)
+{
+ struct xvip_m2m_ctx *ctx = data;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+
+ spin_lock(&xdev->queued_lock);
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |=
+ src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->timecode = src_vb->timecode;
+
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(xdev->m2m_dev, ctx->fh.m2m_ctx);
+ spin_unlock(&xdev->queued_lock);
+}
+
+/*
+ * Queue operations
+ */
+
+static int xvip_m2m_queue_setup(struct vb2_queue *vq,
+ u32 *nbuffers, u32 *nplanes,
+ u32 sizes[], struct device *alloc_devs[])
+{
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vq);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct v4l2_format *f;
+ const struct xvip_video_format *info;
+ u32 i;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ f = &dma->outfmt;
+ info = dma->outinfo;
+ } else {
+ f = &dma->capfmt;
+ info = dma->capinfo;
+ }
+
+ if (*nplanes) {
+ if (*nplanes != f->fmt.pix_mp.num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; i++) {
+ if (sizes[i] < f->fmt.pix_mp.plane_fmt[i].sizeimage)
+ return -EINVAL;
+ }
+ } else {
+ *nplanes = info->buffers;
+ for (i = 0; i < info->buffers; i++)
+ sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
+
+ return 0;
+}
+
+static int xvip_m2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct v4l2_format *f;
+ const struct xvip_video_format *info;
+ u32 i;
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ f = &dma->outfmt;
+ info = dma->outinfo;
+ } else {
+ f = &dma->capfmt;
+ info = dma->capinfo;
+ }
+
+ for (i = 0; i < info->buffers; i++) {
+ if (vb2_plane_size(vb, i) <
+ f->fmt.pix_mp.plane_fmt[i].sizeimage) {
+ dev_err(ctx->xdev->dev,
+ "insufficient plane size (%u < %u)\n",
+ (u32)vb2_plane_size(vb, i),
+ f->fmt.pix_mp.plane_fmt[i].sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, i,
+ f->fmt.pix_mp.plane_fmt[i].sizeimage);
+ }
+
+ return 0;
+}
+
+static void xvip_m2m_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void xvip_m2m_stop_streaming(struct vb2_queue *q)
+{
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
+ struct vb2_v4l2_buffer *vbuf;
+
+ dma->crop = false;
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ dmaengine_terminate_sync(dma->chan_tx);
+ else
+ dmaengine_terminate_sync(dma->chan_rx);
+
+ if (ctx->xdev->num_subdevs) {
+ /* Stop the pipeline. */
+ xvip_pipeline_set_stream(pipe, false);
+
+ /* Cleanup the pipeline and mark it as being stopped. */
+ xvip_pipeline_cleanup(pipe);
+ media_pipeline_stop(&dma->video.entity);
+ }
+
+ for (;;) {
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (!vbuf)
+ return;
+
+ spin_lock(&ctx->xdev->queued_lock);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ spin_unlock(&ctx->xdev->queued_lock);
+ }
+}
+
+static int xvip_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ struct xvip_pipeline *pipe;
+ int ret;
+
+ if (!xdev->num_subdevs)
+ return 0;
+
+ pipe = dma->video.entity.pipe
+ ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
+
+ ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
+ if (ret < 0)
+ goto error;
+
+ /* Verify that the configured format matches the output of the
+ * connected subdev.
+ */
+ ret = xvip_dma_verify_format(dma);
+ if (ret < 0)
+ goto error_stop;
+
+ ret = xvip_pipeline_prepare(pipe, dma);
+ if (ret < 0)
+ goto error_stop;
+
+ /* Start the pipeline. */
+ ret = xvip_pipeline_set_stream(pipe, true);
+ if (ret < 0)
+ goto error_stop;
+
+ return 0;
+error_stop:
+ media_pipeline_stop(&dma->video.entity);
+
+error:
+ xvip_m2m_stop_streaming(q);
+
+ return ret;
+}
+
+static const struct vb2_ops m2m_vb2_ops = {
+ .queue_setup = xvip_m2m_queue_setup,
+ .buf_prepare = xvip_m2m_buf_prepare,
+ .buf_queue = xvip_m2m_buf_queue,
+ .start_streaming = xvip_m2m_start_streaming,
+ .stop_streaming = xvip_m2m_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int xvip_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct xvip_m2m_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &m2m_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = ctx->xdev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &m2m_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = ctx->xdev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int
+xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ strlcpy(cap->driver, XVIP_M2M_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, XVIP_M2M_NAME, sizeof(cap->card));
+ strlcpy(cap->bus_info, XVIP_M2M_NAME, sizeof(cap->card));
+
+ return 0;
+}
+
+static int
+xvip_m2m_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ const struct xvip_video_format *fmtinfo;
+ const struct xvip_video_format *fmt;
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_format v4l_fmt;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ u32 i, fmt_cnt, *fmts;
+ int ret;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ ret = xilinx_xdma_get_v4l2_vid_fmts(dma->chan_rx,
+ &fmt_cnt, &fmts);
+ else
+ ret = xilinx_xdma_get_v4l2_vid_fmts(dma->chan_tx,
+ &fmt_cnt, &fmts);
+ if (ret)
+ return ret;
+
+ if (f->index >= fmt_cnt)
+ return -EINVAL;
+
+ if (!xdev->num_subdevs) {
+ fmt = xvip_get_format_by_fourcc(fmts[f->index]);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ f->pixelformat = fmt->fourcc;
+ strlcpy(f->description, fmt->description,
+ sizeof(f->description));
+ return 0;
+ }
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ /* Establish media pad format */
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SOURCE],
+ &v4l_fmt.pad);
+ else
+ subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SINK],
+ &v4l_fmt.pad);
+ if (!subdev)
+ return -EPIPE;
+
+ v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ for (i = 0; i < fmt_cnt; i++) {
+ fmt = xvip_get_format_by_fourcc(fmts[i]);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ if (fmt->code == v4l_fmt.format.code)
+ break;
+ }
+
+ if (i >= fmt_cnt)
+ return -EINVAL;
+
+ fmtinfo = xvip_get_format_by_fourcc(fmts[i]);
+ f->pixelformat = fmtinfo->fourcc;
+ strlcpy(f->description, fmtinfo->description, sizeof(f->description));
+
+ return 0;
+}
+
+static int xvip_m2m_get_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ f->fmt.pix_mp = dma->outfmt.fmt.pix_mp;
+ else
+ f->fmt.pix_mp = dma->capfmt.fmt.pix_mp;
+
+ return 0;
+}
+
+static int __xvip_m2m_try_fmt(struct xvip_m2m_ctx *ctx, struct v4l2_format *f)
+{
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ const struct xvip_video_format *info;
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+ u32 align, min_width, max_width;
+ u32 bpl, min_bpl, max_bpl;
+ u32 padding_factor_nume, padding_factor_deno;
+ u32 bpl_nume, bpl_deno;
+ u32 i, plane_width, plane_height;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ int ret;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ if (xdev->num_subdevs) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ subdev = xvip_dma_remote_subdev
+ (&dma->pads[XVIP_PAD_SOURCE], &fmt.pad);
+ else
+ subdev = xvip_dma_remote_subdev
+ (&dma->pads[XVIP_PAD_SINK], &fmt.pad);
+
+ if (!subdev)
+ return -EPIPE;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return -EINVAL;
+ }
+
+ pix_mp = &f->fmt.pix_mp;
+ plane_fmt = pix_mp->plane_fmt;
+ info = xvip_get_format_by_fourcc(f->fmt.pix_mp.pixelformat);
+ if (info) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ dma->outinfo = info;
+ else
+ dma->capinfo = info;
+ } else {
+ info = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
+ }
+
+ if (xdev->num_subdevs) {
+ if (info->code != fmt.format.code ||
+ fmt.format.width != pix_mp->width ||
+ fmt.format.height != pix_mp->height) {
+ dev_err(xdev->dev, "Failed to set format\n");
+ dev_info(xdev->dev,
+ "Reqed Code = %d, Width = %d, Height = %d\n",
+ info->code, pix_mp->width, pix_mp->height);
+ dev_info(xdev->dev,
+ "Subdev Code = %d, Width = %d, Height = %d",
+ fmt.format.code, fmt.format.width,
+ fmt.format.height);
+ return -EINVAL;
+ }
+ }
+
+ xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
+
+ /*
+ * V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported
+ */
+ align = lcm(dma->align, info->bpp >> 3);
+ min_width = roundup(XVIP_M2M_MIN_WIDTH, align);
+ max_width = rounddown(XVIP_M2M_MAX_WIDTH, align);
+ pix_mp->width = clamp(pix_mp->width, min_width, max_width);
+ pix_mp->height = clamp(pix_mp->height, XVIP_M2M_MIN_HEIGHT,
+ XVIP_M2M_MAX_HEIGHT);
+
+ /*
+ * Clamp the requested bytes per line value. If the maximum
+ * bytes per line value is zero, the module doesn't support
+ * user configurable line sizes. Override the requested value
+ * with the minimum in that case.
+ */
+ max_bpl = rounddown(XVIP_M2M_MAX_WIDTH, align);
+
+ if (info->buffers == 1) {
+ /* Handling contiguous data with mplanes */
+ min_bpl = (pix_mp->width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, align);
+ bpl = roundup(plane_fmt[0].bytesperline, align);
+ plane_fmt[0].bytesperline = clamp(bpl, min_bpl, max_bpl);
+
+ if (info->num_planes == 1) {
+ /* Single plane formats */
+ plane_fmt[0].sizeimage = plane_fmt[0].bytesperline *
+ pix_mp->height;
+ } else {
+ /* Multi plane formats in contiguous buffer*/
+ plane_fmt[0].sizeimage =
+ DIV_ROUND_UP(plane_fmt[0].bytesperline *
+ pix_mp->height *
+ info->bpp, 8);
+ }
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ for (i = 0; i < info->num_planes; i++) {
+ plane_width = pix_mp->width / (i ? info->hsub : 1);
+ plane_height = pix_mp->height / (i ? info->vsub : 1);
+ min_bpl = (plane_width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, align);
+ bpl = rounddown(plane_fmt[i].bytesperline, align);
+ plane_fmt[i].bytesperline = clamp(bpl, min_bpl,
+ max_bpl);
+ plane_fmt[i].sizeimage = plane_fmt[i].bytesperline *
+ plane_height;
+ }
+ }
+
+ return 0;
+}
+
+static int xvip_m2m_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ int ret;
+
+ ret = __xvip_m2m_try_fmt(ctx, f);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int xvip_m2m_set_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->xdev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = __xvip_m2m_try_fmt(ctx, f);
+ if (ret < 0)
+ return ret;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ dma->outfmt.fmt.pix_mp = f->fmt.pix_mp;
+ else
+ dma->capfmt.fmt.pix_mp = f->fmt.pix_mp;
+
+ return 0;
+}
+
+static int
+xvip_m2m_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ int ret = 0;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ ret = -ENOTTY;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = dma->r.width;
+ s->r.height = dma->r.height;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int
+xvip_m2m_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ u32 min_width, max_width;
+ int ret = 0;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ ret = -ENOTTY;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ if (s->r.width > dma->outfmt.fmt.pix_mp.width ||
+ s->r.height > dma->outfmt.fmt.pix_mp.height ||
+ s->r.top != 0 || s->r.left != 0)
+ return -EINVAL;
+
+ dma->crop = true;
+ min_width = roundup(XVIP_M2M_MIN_WIDTH, dma->align);
+ max_width = rounddown(XVIP_M2M_MAX_WIDTH, dma->align);
+ dma->r.width = clamp(s->r.width, min_width, max_width);
+ dma->r.height = s->r.height;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops xvip_m2m_ioctl_ops = {
+ .vidioc_querycap = xvip_dma_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = xvip_m2m_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = xvip_m2m_get_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = xvip_m2m_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = xvip_m2m_set_fmt,
+
+ .vidioc_enum_fmt_vid_out_mplane = xvip_m2m_enum_fmt,
+ .vidioc_g_fmt_vid_out_mplane = xvip_m2m_get_fmt,
+ .vidioc_try_fmt_vid_out_mplane = xvip_m2m_try_fmt,
+ .vidioc_s_fmt_vid_out_mplane = xvip_m2m_set_fmt,
+ .vidioc_s_selection = xvip_m2m_s_selection,
+ .vidioc_g_selection = xvip_m2m_g_selection,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+};
+
+/*
+ * File operations
+ */
+static int xvip_m2m_open(struct file *file)
+{
+ struct xvip_m2m_dev *xdev = video_drvdata(file);
+ struct xvip_m2m_ctx *ctx = NULL;
+ int ret;
+
+ ctx = devm_kzalloc(xdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->xdev = xdev;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(xdev->m2m_dev, ctx,
+ &xvip_m2m_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ v4l2_fh_exit(&ctx->fh);
+ return ret;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+ dev_info(xdev->dev, "Created instance %p, m2m_ctx: %p\n", ctx,
+ ctx->fh.m2m_ctx);
+ return 0;
+}
+
+static int xvip_m2m_release(struct file *file)
+{
+ struct xvip_m2m_ctx *ctx = file->private_data;
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ return 0;
+}
+
+static u32 xvip_m2m_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct xvip_m2m_ctx *ctx = file->private_data;
+ int ret;
+
+ mutex_lock(&ctx->xdev->lock);
+ ret = v4l2_m2m_poll(file, ctx->fh.m2m_ctx, wait);
+ mutex_unlock(&ctx->xdev->lock);
+
+ return ret;
+}
+
+static int xvip_m2m_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct xvip_m2m_ctx *ctx = file->private_data;
+
+ return v4l2_m2m_mmap(file, ctx->fh.m2m_ctx, vma);
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+static int xvip_m2m_job_ready(void *priv)
+{
+ struct xvip_m2m_ctx *ctx = priv;
+
+ if ((v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) &&
+ (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) > 0))
+ return 1;
+
+ return 0;
+}
+
+static void xvip_m2m_job_abort(void *priv)
+{
+ struct xvip_m2m_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ v4l2_m2m_job_finish(ctx->xdev->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void xvip_m2m_prep_submit_dev2mem_desc(struct xvip_m2m_ctx *ctx,
+ struct vb2_v4l2_buffer *dst_buf)
+{
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ struct dma_async_tx_descriptor *desc;
+ dma_addr_t p_out;
+ const struct xvip_video_format *info;
+ struct v4l2_pix_format_mplane *pix_mp;
+ u32 padding_factor_nume, padding_factor_deno;
+ u32 bpl_nume, bpl_deno;
+ u32 luma_size;
+ u32 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ enum operation_mode mode = DEFAULT;
+
+ p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+
+ if (!p_out) {
+ dev_err(xdev->dev,
+ "Acquiring kernel pointer to buffer failed\n");
+ return;
+ }
+
+ ctx->xt.dir = DMA_DEV_TO_MEM;
+ ctx->xt.src_sgl = false;
+ ctx->xt.dst_sgl = true;
+ ctx->xt.dst_start = p_out;
+
+ pix_mp = &dma->capfmt.fmt.pix_mp;
+ info = dma->capinfo;
+ xilinx_xdma_set_mode(dma->chan_rx, mode);
+ xilinx_xdma_v4l2_config(dma->chan_rx, pix_mp->pixelformat);
+ xvip_width_padding_factor(pix_mp->pixelformat, &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume, &bpl_deno);
+
+ ctx->xt.frame_size = info->num_planes;
+ ctx->sgl[0].size = (pix_mp->width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ ctx->sgl[0].icg = pix_mp->plane_fmt[0].bytesperline - ctx->sgl[0].size;
+ ctx->xt.numf = pix_mp->height;
+
+ /*
+ * dst_icg is the number of bytes to jump after last luma addr
+ * and before first chroma addr
+ */
+ ctx->sgl[0].src_icg = 0;
+
+ if (info->buffers == 1) {
+ /* Handling contiguous data with mplanes */
+ ctx->sgl[0].dst_icg = 0;
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ if (info->buffers == 2) {
+ dma_addr_t chroma_cap =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
+ luma_size = pix_mp->plane_fmt[0].bytesperline *
+ ctx->xt.numf;
+ if (chroma_cap > p_out)
+ ctx->sgl[0].dst_icg = chroma_cap - p_out -
+ luma_size;
+ }
+ }
+
+ desc = dmaengine_prep_interleaved_dma(dma->chan_rx, &ctx->xt, flags);
+ if (!desc) {
+ dev_err(xdev->dev, "Failed to prepare DMA rx transfer\n");
+ return;
+ }
+
+ desc->callback = xvip_m2m_dma_callback;
+ desc->callback_param = ctx;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan_rx);
+}
+
+static void xvip_m2m_prep_submit_mem2dev_desc(struct xvip_m2m_ctx *ctx,
+ struct vb2_v4l2_buffer *src_buf)
+{
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ struct dma_async_tx_descriptor *desc;
+ dma_addr_t p_in;
+ const struct xvip_video_format *info;
+ struct v4l2_pix_format_mplane *pix_mp;
+ u32 padding_factor_nume, padding_factor_deno;
+ u32 bpl_nume, bpl_deno;
+ u32 luma_size;
+ u32 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ enum operation_mode mode = DEFAULT;
+ u32 bpl, src_width, src_height;
+
+ p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+
+ if (!p_in) {
+ dev_err(xdev->dev,
+ "Acquiring kernel pointer to buffer failed\n");
+ return;
+ }
+
+ ctx->xt.dir = DMA_MEM_TO_DEV;
+ ctx->xt.src_sgl = true;
+ ctx->xt.dst_sgl = false;
+ ctx->xt.src_start = p_in;
+
+ pix_mp = &dma->outfmt.fmt.pix_mp;
+ bpl = pix_mp->plane_fmt[0].bytesperline;
+ if (dma->crop) {
+ src_width = dma->r.width;
+ src_height = dma->r.height;
+ } else {
+ src_width = pix_mp->width;
+ src_height = pix_mp->height;
+ }
+
+ info = dma->outinfo;
+ xilinx_xdma_set_mode(dma->chan_tx, mode);
+ xilinx_xdma_v4l2_config(dma->chan_tx, pix_mp->pixelformat);
+ xvip_width_padding_factor(pix_mp->pixelformat, &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume, &bpl_deno);
+
+ ctx->xt.frame_size = info->num_planes;
+ ctx->sgl[0].size = (src_width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ ctx->sgl[0].icg = bpl - ctx->sgl[0].size;
+ ctx->xt.numf = src_height;
+
+ /*
+ * src_icg is the number of bytes to jump after last luma addr
+ * and before first chroma addr
+ */
+ ctx->sgl[0].dst_icg = 0;
+
+ if (info->buffers == 1) {
+ /* Handling contiguous data with mplanes */
+ ctx->sgl[0].src_icg = 0;
+ if (dma->crop)
+ ctx->sgl[0].src_icg = bpl *
+ (pix_mp->height - src_height);
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ if (info->buffers == 2) {
+ dma_addr_t chroma_out =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 1);
+ luma_size = bpl * ctx->xt.numf;
+ if (chroma_out > p_in)
+ ctx->sgl[0].src_icg = chroma_out - p_in -
+ luma_size;
+ }
+ }
+
+ desc = dmaengine_prep_interleaved_dma(dma->chan_tx, &ctx->xt, flags);
+ if (!desc) {
+ dev_err(xdev->dev, "Failed to prepare DMA tx transfer\n");
+ return;
+ }
+
+ desc->callback = xvip_m2m_dma_callback_mem2dev;
+ desc->callback_param = ctx;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan_tx);
+}
+
+/**
+ * xvip_m2m_device_run - prepares and starts the device
+ *
+ * @priv: Instance private data
+ *
+ * This simulates all the immediate preparations required before starting
+ * a device. This will be called by the framework when it decides to schedule
+ * a particular instance.
+ */
+static void xvip_m2m_device_run(void *priv)
+{
+ struct xvip_m2m_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ /* Prepare and submit mem2dev transaction */
+ xvip_m2m_prep_submit_mem2dev_desc(ctx, src_buf);
+
+ /* Prepare and submit dev2mem transaction */
+ xvip_m2m_prep_submit_dev2mem_desc(ctx, dst_buf);
+}
+
+static const struct v4l2_file_operations xvip_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = xvip_m2m_open,
+ .release = xvip_m2m_release,
+ .poll = xvip_m2m_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = xvip_m2m_mmap,
+};
+
+static struct video_device xvip_m2m_videodev = {
+ .name = XVIP_M2M_NAME,
+ .fops = &xvip_m2m_fops,
+ .ioctl_ops = &xvip_m2m_ioctl_ops,
+ .release = video_device_release_empty,
+ .vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
+ .vfl_type = VFL_TYPE_GRABBER,
+};
+
+static const struct v4l2_m2m_ops xvip_m2m_ops = {
+ .device_run = xvip_m2m_device_run,
+ .job_ready = xvip_m2m_job_ready,
+ .job_abort = xvip_m2m_job_abort,
+};
+
+static int xvip_m2m_dma_init(struct xvip_m2m_dma *dma)
+{
+ struct xvip_m2m_dev *xdev;
+ struct v4l2_pix_format_mplane *pix_mp;
+ int ret;
+
+ xdev = dma->xdev;
+ mutex_init(&xdev->lock);
+ mutex_init(&dma->pipe.lock);
+ spin_lock_init(&xdev->queued_lock);
+
+ /* Format info on capture port - NV12 is the default format */
+ dma->capinfo = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
+ pix_mp = &dma->capfmt.fmt.pix_mp;
+ pix_mp->pixelformat = dma->capinfo->fourcc;
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->width = XVIP_M2M_DEF_WIDTH;
+ pix_mp->height = XVIP_M2M_DEF_HEIGHT;
+ pix_mp->plane_fmt[0].bytesperline = pix_mp->width *
+ dma->capinfo->bpl_factor;
+ pix_mp->plane_fmt[0].sizeimage =
+ DIV_ROUND_UP(pix_mp->plane_fmt[0].bytesperline *
+ pix_mp->height * dma->capinfo->bpp, 8);
+
+ /* Format info on output port - NV12 is the default format */
+ dma->outinfo = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
+ pix_mp = &dma->capfmt.fmt.pix_mp;
+ pix_mp->pixelformat = dma->outinfo->fourcc;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->width = XVIP_M2M_DEF_WIDTH;
+ pix_mp->height = XVIP_M2M_DEF_HEIGHT;
+ pix_mp->plane_fmt[0].bytesperline = pix_mp->width *
+ dma->outinfo->bpl_factor;
+ pix_mp->plane_fmt[0].sizeimage =
+ DIV_ROUND_UP(pix_mp->plane_fmt[0].bytesperline *
+ pix_mp->height * dma->outinfo->bpp, 8);
+
+ /* DMA channels for mem2mem */
+ dma->chan_tx = dma_request_chan(xdev->dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
+ ret = PTR_ERR(dma->chan_tx);
+ if (ret != -EPROBE_DEFER)
+ dev_err(xdev->dev, "mem2mem DMA tx channel not found");
+
+ return ret;
+ }
+
+ dma->chan_rx = dma_request_chan(xdev->dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
+ ret = PTR_ERR(dma->chan_rx);
+ if (ret != -EPROBE_DEFER)
+ dev_err(xdev->dev, "mem2mem DMA rx channel not found");
+
+ goto tx;
+ }
+
+ dma->align = BIT(dma->chan_tx->device->copy_align);
+
+ /* Video node */
+ dma->video = xvip_m2m_videodev;
+ dma->video.v4l2_dev = &xdev->v4l2_dev;
+ dma->video.lock = &xdev->lock;
+
+ dma->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ dma->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&dma->video.entity, 2, dma->pads);
+ if (ret < 0)
+ goto error;
+
+ ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(xdev->dev, "Failed to register mem2mem video device\n");
+ goto tx_rx;
+ }
+
+ video_set_drvdata(&dma->video, dma->xdev);
+ return 0;
+
+tx_rx:
+ dma_release_channel(dma->chan_rx);
+tx:
+ dma_release_channel(dma->chan_tx);
+error:
+ return ret;
+}
+
+static void xvip_m2m_dma_deinit(struct xvip_m2m_dma *dma)
+{
+ if (video_is_registered(&dma->video))
+ video_unregister_device(&dma->video);
+
+ mutex_destroy(&dma->pipe.lock);
+ mutex_destroy(&dma->xdev->lock);
+ dma_release_channel(dma->chan_tx);
+ dma_release_channel(dma->chan_rx);
+}
+
+static int xvip_m2m_dma_alloc_init(struct xvip_m2m_dev *xdev)
+{
+ struct xvip_m2m_dma *dma = NULL;
+ int ret;
+
+ dma = devm_kzalloc(xdev->dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->xdev = xdev;
+ xdev->dma = dma;
+
+ ret = xvip_m2m_dma_init(xdev->dma);
+ if (ret) {
+ dev_err(xdev->dev, "DMA initialization failed\n");
+ return ret;
+ }
+
+ xdev->v4l2_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+static void xvip_composite_v4l2_cleanup(struct xvip_m2m_dev *xdev)
+{
+ v4l2_device_unregister(&xdev->v4l2_dev);
+ media_device_unregister(&xdev->media_dev);
+ media_device_cleanup(&xdev->media_dev);
+}
+
+static int xvip_composite_v4l2_init(struct xvip_m2m_dev *xdev)
+{
+ int ret;
+
+ xdev->media_dev.dev = xdev->dev;
+ strlcpy(xdev->media_dev.model, "Xilinx Videoi M2M Composite Device",
+ sizeof(xdev->media_dev.model));
+ xdev->media_dev.hw_revision = 0;
+
+ media_device_init(&xdev->media_dev);
+
+ xdev->v4l2_dev.mdev = &xdev->media_dev;
+ ret = v4l2_device_register(xdev->dev, &xdev->v4l2_dev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "V4L2 device registration failed (%d)\n",
+ ret);
+ media_device_cleanup(&xdev->media_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct xvip_graph_entity *
+xvip_graph_find_entity(struct xvip_m2m_dev *xdev,
+ const struct device_node *node)
+{
+ struct xvip_graph_entity *entity;
+
+ list_for_each_entry(entity, &xdev->entities, list) {
+ if (entity->node == node)
+ return entity;
+ }
+
+ return NULL;
+}
+
+static int xvip_graph_build_one(struct xvip_m2m_dev *xdev,
+ struct xvip_graph_entity *entity)
+{
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ struct media_entity *local = entity->entity;
+ struct media_entity *remote;
+ struct media_pad *local_pad;
+ struct media_pad *remote_pad;
+ struct xvip_graph_entity *ent;
+ struct v4l2_fwnode_link link;
+ struct device_node *ep = NULL;
+ struct device_node *next;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "creating links for entity %s\n", local->name);
+
+ while (1) {
+ /* Get the next endpoint and parse its link. */
+ next = of_graph_get_next_endpoint(entity->node, ep);
+ if (!next)
+ break;
+
+ ep = next;
+
+ dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
+
+ ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
+ if (ret < 0) {
+ dev_err(xdev->dev, "failed to parse link for %pOF\n",
+ ep);
+ continue;
+ }
+
+ /* Skip sink ports, they will be processed from the other end of
+ * the link.
+ */
+ if (link.local_port >= local->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u for %pOF\n",
+ link.local_port,
+ to_of_node(link.local_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ local_pad = &local->pads[link.local_port];
+
+ if (local_pad->flags & MEDIA_PAD_FL_SINK) {
+ dev_dbg(xdev->dev, "skipping sink port %pOF:%u\n",
+ to_of_node(link.local_node),
+ link.local_port);
+ v4l2_fwnode_put_link(&link);
+ continue;
+ }
+
+ /* Skip DMA engines, they will be processed separately. */
+ if (link.remote_node == of_fwnode_handle(xdev->dev->of_node)) {
+ dev_dbg(xdev->dev, "skipping DMA port %pOF:%u\n",
+ to_of_node(link.local_node),
+ link.local_port);
+ v4l2_fwnode_put_link(&link);
+ continue;
+ }
+
+ /* Find the remote entity. */
+ ent = xvip_graph_find_entity(xdev,
+ to_of_node(link.remote_node));
+ if (!ent) {
+ dev_err(xdev->dev, "no entity found for %pOF\n",
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -ENODEV;
+ break;
+ }
+
+ remote = ent->entity;
+
+ if (link.remote_port >= remote->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u on %pOF\n",
+ link.remote_port, to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ remote_pad = &remote->pads[link.remote_port];
+
+ v4l2_fwnode_put_link(&link);
+
+ /* Create the media link. */
+ dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index);
+
+ ret = media_create_pad_link(local, local_pad->index,
+ remote, remote_pad->index,
+ link_flags);
+ if (ret < 0) {
+ dev_err(xdev->dev,
+ "failed to create %s:%u -> %s:%u link\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int xvip_graph_parse_one(struct xvip_m2m_dev *xdev,
+ struct device_node *node)
+{
+ struct xvip_graph_entity *entity;
+ struct device_node *remote;
+ struct device_node *ep = NULL;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "parsing node %pOF\n", node);
+
+ while (1) {
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ break;
+
+ dev_dbg(xdev->dev, "handling endpoint %pOF %s\n",
+ ep, ep->name);
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ ret = -EINVAL;
+ break;
+ }
+ dev_dbg(xdev->dev, "Remote endpoint %pOF %s\n",
+ remote, remote->name);
+
+ /* Skip entities that we have already processed. */
+ if (remote == xdev->dev->of_node ||
+ xvip_graph_find_entity(xdev, remote)) {
+ of_node_put(remote);
+ continue;
+ }
+
+ entity = devm_kzalloc(xdev->dev, sizeof(*entity), GFP_KERNEL);
+ if (!entity) {
+ of_node_put(remote);
+ ret = -ENOMEM;
+ break;
+ }
+
+ entity->node = remote;
+ entity->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ entity->asd.match.fwnode = of_fwnode_handle(remote);
+ list_add_tail(&entity->list, &xdev->entities);
+ xdev->num_subdevs++;
+ }
+
+ of_node_put(ep);
+ return ret;
+}
+
+static int xvip_graph_parse(struct xvip_m2m_dev *xdev)
+{
+ struct xvip_graph_entity *entity;
+ int ret;
+
+ /*
+ * Walk the links to parse the full graph. Start by parsing the
+ * composite node and then parse entities in turn. The list_for_each
+ * loop will handle entities added at the end of the list while walking
+ * the links.
+ */
+ ret = xvip_graph_parse_one(xdev, xdev->dev->of_node);
+ if (ret < 0)
+ return 0;
+
+ list_for_each_entry(entity, &xdev->entities, list) {
+ ret = xvip_graph_parse_one(xdev, entity->node);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int xvip_graph_build_dma(struct xvip_m2m_dev *xdev)
+{
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ struct device_node *node = xdev->dev->of_node;
+ struct media_entity *source;
+ struct media_entity *sink;
+ struct media_pad *source_pad;
+ struct media_pad *sink_pad;
+ struct xvip_graph_entity *ent;
+ struct v4l2_fwnode_link link;
+ struct device_node *ep = NULL;
+ struct device_node *next;
+ struct xvip_m2m_dma *dma = xdev->dma;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "creating links for DMA engines\n");
+
+ while (1) {
+ /* Get the next endpoint and parse its link. */
+ next = of_graph_get_next_endpoint(node, ep);
+ if (!next)
+ break;
+
+ ep = next;
+
+ dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
+
+ ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
+ if (ret < 0) {
+ dev_err(xdev->dev, "failed to parse link for %pOF\n",
+ ep);
+ continue;
+ }
+
+ dev_dbg(xdev->dev, "creating link for DMA engine %s\n",
+ dma->video.name);
+
+ /* Find the remote entity. */
+ ent = xvip_graph_find_entity(xdev,
+ to_of_node(link.remote_node));
+ if (!ent) {
+ dev_err(xdev->dev, "no entity found for %pOF\n",
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -ENODEV;
+ break;
+ }
+ if (link.remote_port >= ent->entity->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u on %pOF\n",
+ link.remote_port,
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ dev_dbg(xdev->dev, "Entity %s %s\n", ent->node->name,
+ ent->node->full_name);
+ dev_dbg(xdev->dev, "port number %u on %pOF\n",
+ link.remote_port, to_of_node(link.remote_node));
+ dev_dbg(xdev->dev, "local port number %u on %pOF\n",
+ link.local_port, to_of_node(link.local_node));
+
+ if (link.local_port == XVIP_PAD_SOURCE) {
+ source = &dma->video.entity;
+ source_pad = &dma->pads[XVIP_PAD_SOURCE];
+ sink = ent->entity;
+ sink_pad = &sink->pads[XVIP_PAD_SINK];
+
+ } else {
+ source = ent->entity;
+ source_pad = &source->pads[XVIP_PAD_SOURCE];
+ sink = &dma->video.entity;
+ sink_pad = &dma->pads[XVIP_PAD_SINK];
+ }
+
+ v4l2_fwnode_put_link(&link);
+
+ /* Create the media link. */
+ dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index);
+
+ ret = media_create_pad_link(source, source_pad->index,
+ sink, sink_pad->index,
+ link_flags);
+ if (ret < 0) {
+ dev_err(xdev->dev,
+ "failed to create %s:%u -> %s:%u link\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int xvip_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct xvip_m2m_dev *xdev =
+ container_of(notifier, struct xvip_m2m_dev, notifier);
+ struct xvip_graph_entity *entity;
+ int ret;
+
+ dev_dbg(xdev->dev, "notify complete, all subdevs registered\n");
+
+ /* Create links for every entity. */
+ list_for_each_entry(entity, &xdev->entities, list) {
+ ret = xvip_graph_build_one(xdev, entity);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Create links for DMA channels. */
+ ret = xvip_graph_build_dma(xdev);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_device_register_subdev_nodes(&xdev->v4l2_dev);
+ if (ret < 0)
+ dev_err(xdev->dev, "failed to register subdev nodes\n");
+
+ return media_device_register(&xdev->media_dev);
+}
+
+static int xvip_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct xvip_m2m_dev *xdev =
+ container_of(notifier, struct xvip_m2m_dev, notifier);
+ struct xvip_graph_entity *entity;
+
+ /* Locate the entity corresponding to the bound subdev and store the
+ * subdev pointer.
+ */
+ list_for_each_entry(entity, &xdev->entities, list) {
+ if (entity->node != subdev->dev->of_node)
+ continue;
+
+ if (entity->subdev) {
+ dev_err(xdev->dev, "duplicate subdev for node %pOF\n",
+ entity->node);
+ return -EINVAL;
+ }
+
+ dev_dbg(xdev->dev, "subdev %s bound\n", subdev->name);
+ entity->entity = &subdev->entity;
+ entity->subdev = subdev;
+ return 0;
+ }
+
+ dev_err(xdev->dev, "no entity for subdev %s\n", subdev->name);
+ return -EINVAL;
+}
+
+static const struct v4l2_async_notifier_operations xvip_graph_notify_ops = {
+ .bound = xvip_graph_notify_bound,
+ .complete = xvip_graph_notify_complete,
+};
+
+static void xvip_graph_cleanup(struct xvip_m2m_dev *xdev)
+{
+ struct xvip_graph_entity *entityp;
+ struct xvip_graph_entity *entity;
+
+ v4l2_async_notifier_unregister(&xdev->notifier);
+
+ list_for_each_entry_safe(entity, entityp, &xdev->entities, list) {
+ of_node_put(entity->node);
+ list_del(&entity->list);
+ }
+}
+
+static int xvip_graph_init(struct xvip_m2m_dev *xdev)
+{
+ struct xvip_graph_entity *entity;
+ struct v4l2_async_subdev **subdevs = NULL;
+ unsigned int num_subdevs;
+ int ret;
+
+ /* Init the DMA channels. */
+ ret = xvip_m2m_dma_alloc_init(xdev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "DMA initialization failed\n");
+ goto done;
+ }
+
+ /* Parse the graph to extract a list of subdevice DT nodes. */
+ ret = xvip_graph_parse(xdev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "graph parsing failed\n");
+ goto done;
+ }
+ dev_dbg(xdev->dev, "Number of subdev = %d\n", xdev->num_subdevs);
+
+ if (!xdev->num_subdevs) {
+ dev_err(xdev->dev, "no subdev found in graph\n");
+ goto done;
+ }
+
+ /* Register the subdevices notifier. */
+ num_subdevs = xdev->num_subdevs;
+ subdevs = devm_kzalloc(xdev->dev, sizeof(*subdevs) * num_subdevs,
+ GFP_KERNEL);
+ if (!subdevs) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ v4l2_async_notifier_init(&xdev->notifier);
+ list_for_each_entry(entity, &xdev->entities, list) {
+ ret = v4l2_async_notifier_add_subdev(&xdev->notifier, &entity->asd);
+ if (ret)
+ goto done;
+ xdev->notifier.ops = &xvip_graph_notify_ops;
+ }
+
+ ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
+ if (ret < 0) {
+ dev_err(xdev->dev, "notifier registration failed\n");
+ goto done;
+ }
+
+ ret = 0;
+
+done:
+ if (ret < 0)
+ xvip_graph_cleanup(xdev);
+
+ return ret;
+}
+
+static int xvip_composite_remove(struct platform_device *pdev)
+{
+ struct xvip_m2m_dev *xdev = platform_get_drvdata(pdev);
+
+ xvip_graph_cleanup(xdev);
+ xvip_composite_v4l2_cleanup(xdev);
+
+ return 0;
+}
+
+static int xvip_m2m_probe(struct platform_device *pdev)
+{
+ struct xvip_m2m_dev *xdev = NULL;
+ int ret;
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+ INIT_LIST_HEAD(&xdev->entities);
+
+ ret = xvip_composite_v4l2_init(xdev);
+ if (ret)
+ return -EINVAL;
+
+ ret = xvip_graph_init(xdev);
+ if (ret < 0)
+ goto error;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
+ goto dma_cleanup;
+ }
+
+ platform_set_drvdata(pdev, xdev);
+
+ xdev->m2m_dev = v4l2_m2m_init(&xvip_m2m_ops);
+ if (IS_ERR(xdev->m2m_dev)) {
+ dev_err(xdev->dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(xdev->m2m_dev);
+ goto dma_cleanup;
+ }
+
+ dev_info(xdev->dev, "mem2mem device registered\n");
+ return 0;
+
+dma_cleanup:
+ xvip_m2m_dma_deinit(xdev->dma);
+
+error:
+ v4l2_device_unregister(&xdev->v4l2_dev);
+ return ret;
+}
+
+static int xvip_m2m_remove(struct platform_device *pdev)
+{
+ xvip_composite_remove(pdev);
+ return 0;
+}
+
+static const struct of_device_id xvip_m2m_of_id_table[] = {
+ { .compatible = "xlnx,mem2mem" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvip_m2m_of_id_table);
+
+static struct platform_driver xvip_m2m_driver = {
+ .driver = {
+ .name = XVIP_M2M_NAME,
+ .of_match_table = xvip_m2m_of_id_table,
+ },
+ .probe = xvip_m2m_probe,
+ .remove = xvip_m2m_remove,
+};
+
+module_platform_driver(xvip_m2m_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx V4L2 mem2mem driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-multi-scaler-coeff.h b/drivers/media/platform/xilinx/xilinx-multi-scaler-coeff.h
new file mode 100644
index 000000000000..65a3482aa249
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-multi-scaler-coeff.h
@@ -0,0 +1,574 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Memory-to-Memory Video Multi-Scaler IP
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Author: Suresh Gupta <sureshg@xilinx.com>
+ *
+ * The file contains the coefficients used by the Xilinx
+ * Video Multi Scaler Controller driver (xm2msc)
+ *
+ */
+
+#define XSCALER_MAX_PHASES (64)
+#define XSCALER_MAX_TAPS (12)
+
+#define XSCALER_TAPS_6 (6)
+#define XSCALER_TAPS_8 (8)
+#define XSCALER_TAPS_10 (10)
+#define XSCALER_TAPS_12 (12)
+
+/* Filter bank ID for various filter tap configurations */
+enum xm2mvsc_filter_bank_id {
+ FILTER_BANK_TAPS_6 = 0,
+ FILTER_BANK_TAPS_8,
+ FILTER_BANK_TAPS_10,
+ FILTER_BANK_TAPS_12,
+};
+
+/* H-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const short
+xhsc_coeff_taps6[XSCALER_MAX_PHASES][XSCALER_TAPS_6] = {
+ { -132, 236, 3824, 236, -132, 64, },
+ { -116, 184, 3816, 292, -144, 64, },
+ { -100, 132, 3812, 348, -160, 64, },
+ { -88, 84, 3808, 404, -176, 64, },
+ { -72, 36, 3796, 464, -192, 64, },
+ { -60, -8, 3780, 524, -208, 68, },
+ { -48, -52, 3768, 588, -228, 68, },
+ { -32, -96, 3748, 652, -244, 68, },
+ { -20, -136, 3724, 716, -260, 72, },
+ { -8, -172, 3696, 784, -276, 72, },
+ { 0, -208, 3676, 848, -292, 72, },
+ { 12, -244, 3640, 920, -308, 76, },
+ { 20, -276, 3612, 988, -324, 76, },
+ { 32, -304, 3568, 1060, -340, 80, },
+ { 40, -332, 3532, 1132, -356, 80, },
+ { 48, -360, 3492, 1204, -372, 84, },
+ { 56, -384, 3448, 1276, -388, 88, },
+ { 64, -408, 3404, 1352, -404, 88, },
+ { 72, -428, 3348, 1428, -416, 92, },
+ { 76, -448, 3308, 1500, -432, 92, },
+ { 84, -464, 3248, 1576, -444, 96, },
+ { 88, -480, 3200, 1652, -460, 96, },
+ { 92, -492, 3140, 1728, -472, 100, },
+ { 96, -504, 3080, 1804, -484, 104, },
+ { 100, -516, 3020, 1880, -492, 104, },
+ { 104, -524, 2956, 1960, -504, 104, },
+ { 104, -532, 2892, 2036, -512, 108, },
+ { 108, -540, 2832, 2108, -520, 108, },
+ { 108, -544, 2764, 2184, -528, 112, },
+ { 112, -544, 2688, 2260, -532, 112, },
+ { 112, -548, 2624, 2336, -540, 112, },
+ { 112, -548, 2556, 2408, -544, 112, },
+ { 112, -544, 2480, 2480, -544, 112, },
+ { 112, -544, 2408, 2556, -548, 112, },
+ { 112, -540, 2336, 2624, -548, 112, },
+ { 112, -532, 2260, 2688, -544, 112, },
+ { 112, -528, 2184, 2764, -544, 108, },
+ { 108, -520, 2108, 2832, -540, 108, },
+ { 108, -512, 2036, 2892, -532, 104, },
+ { 104, -504, 1960, 2956, -524, 104, },
+ { 104, -492, 1880, 3020, -516, 100, },
+ { 104, -484, 1804, 3080, -504, 96, },
+ { 100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const short
+xhsc_coeff_taps8[XSCALER_MAX_PHASES][XSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const short
+xhsc_coeff_taps10[XSCALER_MAX_PHASES][XSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const short
+xhsc_coeff_taps12[XSCALER_MAX_PHASES][XSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
+
+/* V-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const short
+xvsc_coeff_taps6[XSCALER_MAX_PHASES][XSCALER_TAPS_6] = {
+ {-132, 236, 3824, 236, -132, 64, },
+ {-116, 184, 3816, 292, -144, 64, },
+ {-100, 132, 3812, 348, -160, 64, },
+ {-88, 84, 3808, 404, -176, 64, },
+ {-72, 36, 3796, 464, -192, 64, },
+ {-60, -8, 3780, 524, -208, 68, },
+ {-48, -52, 3768, 588, -228, 68, },
+ {-32, -96, 3748, 652, -244, 68, },
+ {-20, -136, 3724, 716, -260, 72, },
+ {-8, -172, 3696, 784, -276, 72, },
+ {0, -208, 3676, 848, -292, 72, },
+ {12, -244, 3640, 920, -308, 76, },
+ {20, -276, 3612, 988, -324, 76, },
+ {32, -304, 3568, 1060, -340, 80, },
+ {40, -332, 3532, 1132, -356, 80, },
+ {48, -360, 3492, 1204, -372, 84, },
+ {56, -384, 3448, 1276, -388, 88, },
+ {64, -408, 3404, 1352, -404, 88, },
+ {72, -428, 3348, 1428, -416, 92, },
+ {76, -448, 3308, 1500, -432, 92, },
+ {84, -464, 3248, 1576, -444, 96, },
+ {88, -480, 3200, 1652, -460, 96, },
+ {92, -492, 3140, 1728, -472, 100, },
+ {96, -504, 3080, 1804, -484, 104, },
+ {100, -516, 3020, 1880, -492, 104, },
+ {104, -524, 2956, 1960, -504, 104, },
+ {104, -532, 2892, 2036, -512, 108, },
+ {108, -540, 2832, 2108, -520, 108, },
+ {108, -544, 2764, 2184, -528, 112, },
+ {112, -544, 2688, 2260, -532, 112, },
+ {112, -548, 2624, 2336, -540, 112, },
+ {112, -548, 2556, 2408, -544, 112, },
+ {112, -544, 2480, 2480, -544, 112, },
+ {112, -544, 2408, 2556, -548, 112, },
+ {112, -540, 2336, 2624, -548, 112, },
+ {112, -532, 2260, 2688, -544, 112, },
+ {112, -528, 2184, 2764, -544, 108, },
+ {108, -520, 2108, 2832, -540, 108, },
+ {108, -512, 2036, 2892, -532, 104, },
+ {104, -504, 1960, 2956, -524, 104, },
+ {104, -492, 1880, 3020, -516, 100, },
+ {104, -484, 1804, 3080, -504, 96, },
+ {100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const short
+xvsc_coeff_taps8[XSCALER_MAX_PHASES][XSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const short
+xvsc_coeff_taps10[XSCALER_MAX_PHASES][XSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const short
+xvsc_coeff_taps12[XSCALER_MAX_PHASES][XSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
diff --git a/drivers/media/platform/xilinx/xilinx-multi-scaler.c b/drivers/media/platform/xilinx/xilinx-multi-scaler.c
new file mode 100644
index 000000000000..2892ed5c223a
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-multi-scaler.c
@@ -0,0 +1,2449 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Memory-to-Memory Video Multi-Scaler IP
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Author: Suresh Gupta <suresh.gupta@xilinx.com>
+ *
+ * Based on the virtual v4l2-mem2mem example device
+ *
+ * This driver adds support to control the Xilinx Video Multi
+ * Scaler Controller
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "xilinx-multi-scaler-coeff.h"
+
+/* 0x0000 : Control signals */
+#define XM2MSC_AP_CTRL 0x0000
+#define XM2MSC_AP_CTRL_START BIT(0)
+#define XM2MSC_AP_CTRL_DONE BIT(1)
+#define XM2MSC_AP_CTRL_IDEL BIT(2)
+#define XM2MSC_AP_CTRL_READY BIT(3)
+#define XM2MSC_AP_CTRL_AUTO_RESTART BIT(7)
+
+/* 0x0004 : Global Interrupt Enable Register */
+#define XM2MSC_GIE 0x0004
+#define XM2MSC_GIE_EN BIT(0)
+
+/* 0x0008 : IP Interrupt Enable Register (Read/Write) */
+#define XM2MSC_IER 0x0008
+#define XM2MSC_ISR 0x000c
+#define XM2MSC_ISR_DONE BIT(0)
+#define XM2MSC_ISR_READY BIT(1)
+
+#define XM2MSC_NUM_OUTS 0x0010
+
+#define XM2MSC_WIDTHIN 0x000
+#define XM2MSC_WIDTHOUT 0x008
+#define XM2MSC_HEIGHTIN 0x010
+#define XM2MSC_HEIGHTOUT 0x018
+#define XM2MSC_LINERATE 0x020
+#define XM2MSC_PIXELRATE 0x028
+#define XM2MSC_INPIXELFMT 0x030
+#define XM2MSC_OUTPIXELFMT 0x038
+#define XM2MSC_INSTRIDE 0x050
+#define XM2MSC_OUTSTRIDE 0x058
+#define XM2MSC_SRCIMGBUF0 0x060
+#define XM2MSC_SRCIMGBUF1 0x070
+#define XM2MSC_DSTIMGBUF0 0x090
+#define XM2MSC_DSTIMGBUF1 0x0100
+
+#define XM2MVSC_VFLTCOEFF_L 0x2000
+#define XM2MVSC_VFLTCOEFF(x) (XM2MVSC_VFLTCOEFF_L + 0x2000 * (x))
+#define XM2MVSC_HFLTCOEFF_L 0x2800
+#define XM2MVSC_HFLTCOEFF(x) (XM2MVSC_HFLTCOEFF_L + 0x2000 * (x))
+
+#define XM2MSC_CHAN_REGS_START(x) (0x100 + 0x200 * (x))
+
+/*
+ * IP has reserved area between XM2MSC_DSTIMGBUF0 and
+ * XM2MSC_DSTIMGBUF1 registers of channel 4
+ */
+#define XM2MSC_RESERVED_AREA 0x600
+
+/* GPIO RESET MACROS */
+#define XM2MSC_RESET_ASSERT (0x1)
+#define XM2MSC_RESET_DEASSERT (0x0)
+
+#define XM2MSC_MIN_CHAN 1
+#define XM2MSC_MAX_CHAN 8
+
+#define XM2MSC_MAX_WIDTH (8192)
+#define XM2MSC_MAX_HEIGHT (4320)
+#define XM2MSC_MIN_WIDTH (64)
+#define XM2MSC_MIN_HEIGHT (64)
+#define XM2MSC_STEP_PRECISION (65536)
+/* Mask definitions for Low 16 bits in a 32 bit number */
+#define XM2MSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XM2MSC_BITSHIFT_16 (16)
+
+#define XM2MSC_DRIVER_NAME "xm2msc"
+
+#define CHAN_ATTACHED BIT(0)
+#define CHAN_OPENED BIT(1)
+
+#define XM2MSC_CHAN_OUT 0
+#define XM2MSC_CHAN_CAP 1
+
+#define NUM_STREAM(_x) \
+ ({ typeof(_x) (x) = (_x); \
+ min(ffz(x->out_streamed_chan), \
+ ffz(x->cap_streamed_chan)); })
+
+#define XM2MSC_ALIGN_MUL 8
+
+/*
+ * These are temporary variables. Once the stride and height
+ * alignment support added to plugin, these variables will
+ * be remove.
+ */
+static unsigned int output_stride_align[XM2MSC_MAX_CHAN] = {
+ 1, 1, 1, 1, 1, 1, 1, 1 };
+module_param_array(output_stride_align, uint, NULL, 0644);
+MODULE_PARM_DESC(output_stride_align,
+ "Per Cahnnel stride alignment requied at output.");
+
+static unsigned int capture_stride_align[XM2MSC_MAX_CHAN] = {
+ 1, 1, 1, 1, 1, 1, 1, 1 };
+module_param_array(capture_stride_align, uint, NULL, 0644);
+MODULE_PARM_DESC(capture_stride_align,
+ "Per channel stride alignment requied at capture.");
+
+static unsigned int output_height_align[XM2MSC_MAX_CHAN] = {
+ 1, 1, 1, 1, 1, 1, 1, 1 };
+module_param_array(output_height_align, uint, NULL, 0644);
+MODULE_PARM_DESC(output_height_align,
+ "Per Channel height alignment requied at output.");
+
+static unsigned int capture_height_align[XM2MSC_MAX_CHAN] = {
+ 1, 1, 1, 1, 1, 1, 1, 1 };
+module_param_array(capture_height_align, uint, NULL, 0644);
+MODULE_PARM_DESC(capture_height_align,
+ "Per channel height alignment requied at capture.");
+
+/* Xilinx Video Specific Color/Pixel Formats */
+enum xm2msc_pix_fmt {
+ XILINX_M2MSC_FMT_RGBX8 = 10,
+ XILINX_M2MSC_FMT_YUVX8 = 11,
+ XILINX_M2MSC_FMT_YUYV8 = 12,
+ XILINX_M2MSC_FMT_RGBX10 = 15,
+ XILINX_M2MSC_FMT_YUVX10 = 16,
+ XILINX_M2MSC_FMT_Y_UV8 = 18,
+ XILINX_M2MSC_FMT_Y_UV8_420 = 19,
+ XILINX_M2MSC_FMT_RGB8 = 20,
+ XILINX_M2MSC_FMT_YUV8 = 21,
+ XILINX_M2MSC_FMT_Y_UV10 = 22,
+ XILINX_M2MSC_FMT_Y_UV10_420 = 23,
+ XILINX_M2MSC_FMT_Y8 = 24,
+ XILINX_M2MSC_FMT_Y10 = 25,
+ XILINX_M2MSC_FMT_BGRX8 = 27,
+ XILINX_M2MSC_FMT_UYVY8 = 28,
+ XILINX_M2MSC_FMT_BGR8 = 29,
+};
+
+/**
+ * struct xm2msc_fmt - driver info for each of the supported video formats
+ * @name: human-readable device tree name for this entry
+ * @fourcc: standard format identifier
+ * @xm2msc_fmt: Xilinx Video Specific Color/Pixel Formats
+ * @num_buffs: number of physically non-contiguous data planes/buffs
+ */
+struct xm2msc_fmt {
+ char *name;
+ u32 fourcc;
+ enum xm2msc_pix_fmt xm2msc_fmt;
+ u32 num_buffs;
+};
+
+static const struct xm2msc_fmt formats[] = {
+ {
+ .name = "xbgr8888",
+ .fourcc = V4L2_PIX_FMT_BGRX32,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_RGBX8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xvuy8888",
+ .fourcc = V4L2_PIX_FMT_XVUY32,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_YUVX8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "yuyv",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_YUYV8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xbgr2101010",
+ .fourcc = V4L2_PIX_FMT_XBGR30,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_RGBX10,
+ .num_buffs = 1,
+ },
+ {
+ .name = "yuvx2101010",
+ .fourcc = V4L2_PIX_FMT_XVUY10,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_YUVX10,
+ .num_buffs = 1,
+ },
+ {
+ .name = "nv16",
+ .fourcc = V4L2_PIX_FMT_NV16M,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8,
+ .num_buffs = 2,
+ },
+ {
+ .name = "nv16",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "nv12",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8_420,
+ .num_buffs = 2,
+ },
+ {
+ .name = "nv12",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8_420,
+ .num_buffs = 1,
+ },
+ {
+ .name = "bgr888",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_RGB8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "vuy888",
+ .fourcc = V4L2_PIX_FMT_VUY24,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_YUV8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xv20",
+ .fourcc = V4L2_PIX_FMT_XV20M,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10,
+ .num_buffs = 2,
+ },
+ {
+ .name = "xv20",
+ .fourcc = V4L2_PIX_FMT_XV20,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xv15",
+ .fourcc = V4L2_PIX_FMT_XV15M,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10_420,
+ .num_buffs = 2,
+ },
+ {
+ .name = "xv15",
+ .fourcc = V4L2_PIX_FMT_XV15,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10_420,
+ .num_buffs = 1,
+ },
+ {
+ .name = "y8",
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "y10",
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y10,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xrgb8888",
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_BGRX8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "uyvy",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_UYVY8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "rgb888",
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_BGR8,
+ .num_buffs = 1,
+ },
+};
+
+/**
+ * struct xm2msc_q_data - Per-queue, driver-specific private data
+ * There is one source queue and one destination queue for each m2m context.
+ * @width: frame width
+ * @height: frame height
+ * @stride: bytes per lines
+ * @nbuffs: Current number of buffs
+ * @bytesperline: bytes per line per plane
+ * @sizeimage: image size per plane
+ * @colorspace: supported colorspace
+ * @field: supported field value
+ * @fmt: format info
+ */
+struct xm2msc_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int stride;
+ unsigned int nbuffs;
+ unsigned int bytesperline[2];
+ unsigned int sizeimage[2];
+ enum v4l2_colorspace colorspace;
+ enum v4l2_field field;
+ const struct xm2msc_fmt *fmt;
+};
+
+/**
+ * struct xm2msc_chan_ctx - Scaler Channel Info, Per-Channel context
+ * @regs: IO mapped base address of the Channel
+ * @xm2msc_dev: Pointer to struct xm2m_msc_dev
+ * @num: HW Scaling Channel number
+ * @minor: Minor number of the video device
+ * @output_stride_align: required align stride value at output pad
+ * @capture_stride_align: required align stride valure at capture pad
+ * @output_height_align: required align height value at output pad
+ * @capture_height_align: required align heigh value at capture pad
+ * @status: channel status, CHAN_ATTACHED or CHAN_OPENED
+ * @frames: number of frames processed
+ * @vfd: V4L2 device
+ * @fh: v4l2 file handle
+ * @m2m_dev: m2m device
+ * @m2m_ctx: memory to memory context structure
+ * @q_data: src & dst queue data
+ */
+struct xm2msc_chan_ctx {
+ void __iomem *regs;
+ struct xm2m_msc_dev *xm2msc_dev;
+ u32 num;
+ u32 minor;
+ u32 output_stride_align;
+ u32 capture_stride_align;
+ u32 output_height_align;
+ u32 capture_height_align;
+ u8 status;
+ unsigned long frames;
+
+ struct video_device vfd;
+ struct v4l2_fh fh;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+
+ struct xm2msc_q_data q_data[2];
+};
+
+/**
+ * struct xm2m_msc_dev - Xilinx M2M Multi-scaler Device
+ * @dev: pointer to struct device instance used by the driver
+ * @regs: IO mapped base address of the HW/IP
+ * @irq: interrupt number
+ * @clk: video core clock
+ * @max_chan: maximum number of Scaling Channels
+ * @max_ht: maximum number of rows in a plane
+ * @max_wd: maximum number of column in a plane
+ * @taps: number of taps set in HW
+ * @supported_fmt: bitmap for all supported fmts by HW
+ * @dma_addr_size: Size of dma address pointer in IP (either 32 or 64)
+ * @ppc: Pixels per clock set in IP (1, 2 or 4)
+ * @rst_gpio: reset gpio handler
+ * @opened_chan: bitmap for all open channel
+ * @out_streamed_chan: bitmap for all out streamed channel
+ * @cap_streamed_chan: bitmap for all capture streamed channel
+ * @running_chan: currently running channels
+ * @device_busy: HW device is busy or not
+ * @isr_wait: flag to follow the ISR complete or not
+ * @isr_finished: Wait queue used to wait for IP to complete processing
+ * @v4l2_dev: main struct to for V4L2 device drivers
+ * @dev_mutex: lock for V4L2 device
+ * @mutex: lock for channel ctx
+ * @lock: lock used in IRQ
+ * @xm2msc_chan: arrey of channel context
+ * @hscaler_coeff: Array of filter coefficients for the Horizontal Scaler
+ * @vscaler_coeff: Array of filter coefficients for the Vertical Scaler
+ */
+struct xm2m_msc_dev {
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ u32 max_chan;
+ u32 max_ht;
+ u32 max_wd;
+ u32 taps;
+ u32 supported_fmt;
+ u32 dma_addr_size;
+ u8 ppc;
+ struct gpio_desc *rst_gpio;
+
+ u32 opened_chan;
+ u32 out_streamed_chan;
+ u32 cap_streamed_chan;
+ u32 running_chan;
+ bool device_busy;
+ bool isr_wait;
+ wait_queue_head_t isr_finished;
+
+ struct v4l2_device v4l2_dev;
+
+ struct mutex dev_mutex; /*the mutex for v4l2*/
+ struct mutex mutex; /*lock for bitmap reg*/
+ spinlock_t lock; /*IRQ lock*/
+
+ struct xm2msc_chan_ctx xm2msc_chan[XM2MSC_MAX_CHAN];
+ short hscaler_coeff[XSCALER_MAX_PHASES][XSCALER_MAX_TAPS];
+ short vscaler_coeff[XSCALER_MAX_PHASES][XSCALER_MAX_TAPS];
+};
+
+#define fh_to_chanctx(__fh) container_of(__fh, struct xm2msc_chan_ctx, fh)
+
+static inline u32 xm2msc_readreg(const void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void xm2msc_write64reg(void __iomem *addr, u64 value)
+{
+ iowrite32(lower_32_bits(value), addr);
+ iowrite32(upper_32_bits(value), (void __iomem *)(addr + 4));
+}
+
+static inline void xm2msc_writereg(void __iomem *addr, u32 value)
+{
+ iowrite32(value, addr);
+}
+
+static bool xm2msc_is_yuv_singlebuff(u32 fourcc)
+{
+ if (fourcc == V4L2_PIX_FMT_NV12 || fourcc == V4L2_PIX_FMT_XV15 ||
+ fourcc == V4L2_PIX_FMT_NV16 || fourcc == V4L2_PIX_FMT_XV20)
+ return true;
+
+ return false;
+}
+
+static inline u32 xm2msc_yuv_1stplane_size(struct xm2msc_q_data *q_data,
+ u32 row_align)
+{
+ return q_data->bytesperline[0] * ALIGN(q_data->height, row_align);
+}
+
+static struct xm2msc_q_data *get_q_data(struct xm2msc_chan_ctx *chan_ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &chan_ctx->q_data[XM2MSC_CHAN_OUT];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &chan_ctx->q_data[XM2MSC_CHAN_CAP];
+ default:
+ v4l2_err(&chan_ctx->xm2msc_dev->v4l2_dev,
+ "Not supported Q type %d\n", type);
+ }
+ return NULL;
+}
+
+static u32 find_format_index(struct v4l2_format *f)
+{
+ const struct xm2msc_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ fmt = &formats[i];
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+ break;
+ }
+
+ return i;
+}
+
+static const struct xm2msc_fmt *find_format(struct v4l2_format *f)
+{
+ const struct xm2msc_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ fmt = &formats[i];
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(formats))
+ return NULL;
+
+ return &formats[i];
+}
+
+static void
+xm2msc_hscaler_load_ext_coeff(struct xm2m_msc_dev *xm2msc,
+ const short *coeff, u32 ntaps)
+{
+ unsigned int i, j, pad, offset;
+ const u32 nphases = XSCALER_MAX_PHASES;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XSCALER_MAX_TAPS - ntaps;
+ offset = pad >> 1;
+
+ memset(xm2msc->hscaler_coeff, 0, sizeof(xm2msc->hscaler_coeff));
+
+ /* Load coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xm2msc->hscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+}
+
+static void xm2msc_hscaler_set_coeff(struct xm2msc_chan_ctx *chan_ctx,
+ const u32 base_addr)
+{
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ int val, offset, rd_indx;
+ unsigned int i, j;
+ u32 ntaps = chan_ctx->xm2msc_dev->taps;
+ const u32 nphases = XSCALER_MAX_PHASES;
+
+ offset = (XSCALER_MAX_TAPS - ntaps) / 2;
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xm2msc->hscaler_coeff[i][rd_indx + 1] <<
+ XM2MSC_BITSHIFT_16) |
+ (xm2msc->hscaler_coeff[i][rd_indx] &
+ XM2MSC_MASK_LOW_16BITS);
+ xm2msc_writereg((xm2msc->regs + base_addr) +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+static void
+xm2msc_vscaler_load_ext_coeff(struct xm2m_msc_dev *xm2msc,
+ const short *coeff, const u32 ntaps)
+{
+ unsigned int i, j;
+ int pad, offset;
+ const u32 nphases = XSCALER_MAX_PHASES;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XSCALER_MAX_TAPS - ntaps;
+ offset = pad ? (pad >> 1) : 0;
+
+ /* Zero Entire Array */
+ memset(xm2msc->vscaler_coeff, 0, sizeof(xm2msc->vscaler_coeff));
+
+ /* Load User defined coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xm2msc->vscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+}
+
+static void
+xm2msc_vscaler_set_coeff(struct xm2msc_chan_ctx *chan_ctx,
+ const u32 base_addr)
+{
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ u32 val, i, j, offset, rd_indx;
+ u32 ntaps = chan_ctx->xm2msc_dev->taps;
+ const u32 nphases = XSCALER_MAX_PHASES;
+
+ offset = (XSCALER_MAX_TAPS - ntaps) / 2;
+
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xm2msc->vscaler_coeff[i][rd_indx + 1] <<
+ XM2MSC_BITSHIFT_16) |
+ (xm2msc->vscaler_coeff[i][rd_indx] &
+ XM2MSC_MASK_LOW_16BITS);
+ xm2msc_writereg((xm2msc->regs +
+ base_addr) + ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+static u32
+xm2msc_select_hcoeff(struct xm2msc_chan_ctx *chan_ctx, const short **coeff)
+{
+ u16 hscale_ratio;
+ u32 width_in = chan_ctx->q_data[XM2MSC_CHAN_OUT].width;
+ u32 width_out = chan_ctx->q_data[XM2MSC_CHAN_CAP].width;
+ u32 ntaps = chan_ctx->xm2msc_dev->taps;
+
+ if (width_out < width_in) {
+ hscale_ratio = (width_in * 10) / width_out;
+
+ switch (chan_ctx->xm2msc_dev->taps) {
+ case XSCALER_TAPS_12:
+ if (hscale_ratio > 35) {
+ *coeff = &xhsc_coeff_taps12[0][0];
+ ntaps = XSCALER_TAPS_12;
+ } else if (hscale_ratio > 25) {
+ *coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ *coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ case XSCALER_TAPS_10:
+ if (hscale_ratio > 25) {
+ *coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ *coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ case XSCALER_TAPS_8:
+ if (hscale_ratio > 15) {
+ *coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ default: /* or XSCALER_TAPS_6 */
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ } else {
+ /*
+ * Scale Up Mode will always use 6 tap filter
+ * This also includes 1:1
+ */
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+
+ return ntaps;
+}
+
+static u32
+xm2msc_select_vcoeff(struct xm2msc_chan_ctx *chan_ctx, const short **coeff)
+{
+ u16 vscale_ratio;
+ u32 height_in = chan_ctx->q_data[XM2MSC_CHAN_OUT].height;
+ u32 height_out = chan_ctx->q_data[XM2MSC_CHAN_CAP].height;
+ u32 ntaps = chan_ctx->xm2msc_dev->taps;
+
+ if (height_out < height_in) {
+ vscale_ratio = (height_in * 10) / height_out;
+
+ switch (chan_ctx->xm2msc_dev->taps) {
+ case XSCALER_TAPS_12:
+ if (vscale_ratio > 35) {
+ *coeff = &xvsc_coeff_taps12[0][0];
+ ntaps = XSCALER_TAPS_12;
+ } else if (vscale_ratio > 25) {
+ *coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ *coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ case XSCALER_TAPS_10:
+ if (vscale_ratio > 25) {
+ *coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ *coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ case XSCALER_TAPS_8:
+ if (vscale_ratio > 15) {
+ *coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ default: /* or XSCALER_TAPS_6 */
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ } else {
+ /*
+ * Scale Up Mode will always use 6 tap filter
+ * This also includes 1:1
+ */
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+
+ return ntaps;
+}
+
+static void xm2mvsc_initialize_coeff_banks(struct xm2msc_chan_ctx *chan_ctx)
+{
+ const short *coeff = NULL;
+ u32 ntaps;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+
+ ntaps = xm2msc_select_hcoeff(chan_ctx, &coeff);
+ xm2msc_hscaler_load_ext_coeff(xm2msc, coeff, ntaps);
+ xm2msc_hscaler_set_coeff(chan_ctx, XM2MVSC_HFLTCOEFF(chan_ctx->num));
+
+ dev_dbg(xm2msc->dev, "htaps %d selected for chan %d\n",
+ ntaps, chan_ctx->num);
+
+ ntaps = xm2msc_select_vcoeff(chan_ctx, &coeff);
+ xm2msc_vscaler_load_ext_coeff(xm2msc, coeff, ntaps);
+ xm2msc_vscaler_set_coeff(chan_ctx, XM2MVSC_VFLTCOEFF(chan_ctx->num));
+
+ dev_dbg(xm2msc->dev, "vtaps %d selected for chan %d\n",
+ ntaps, chan_ctx->num);
+}
+
+static void xm2msc_set_chan_params(struct xm2msc_chan_ctx *chan_ctx,
+ enum v4l2_buf_type type)
+{
+ struct xm2msc_q_data *q_data = get_q_data(chan_ctx, type);
+ const struct xm2msc_fmt *fmt = q_data->fmt;
+ void __iomem *base = chan_ctx->regs;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ xm2msc_writereg(base + XM2MSC_WIDTHIN, q_data->width);
+ xm2msc_writereg(base + XM2MSC_HEIGHTIN, q_data->height);
+ xm2msc_writereg(base + XM2MSC_INPIXELFMT, fmt->xm2msc_fmt);
+ xm2msc_writereg(base + XM2MSC_INSTRIDE, q_data->stride);
+ } else {
+ xm2msc_writereg(base + XM2MSC_WIDTHOUT, q_data->width);
+ xm2msc_writereg(base + XM2MSC_HEIGHTOUT, q_data->height);
+ xm2msc_writereg(base + XM2MSC_OUTPIXELFMT, fmt->xm2msc_fmt);
+ xm2msc_writereg(base + XM2MSC_OUTSTRIDE, q_data->stride);
+ }
+}
+
+static void xm2msc_set_chan_com_params(struct xm2msc_chan_ctx *chan_ctx)
+{
+ void __iomem *base = chan_ctx->regs;
+ struct xm2msc_q_data *out_q_data = &chan_ctx->q_data[XM2MSC_CHAN_OUT];
+ struct xm2msc_q_data *cap_q_data = &chan_ctx->q_data[XM2MSC_CHAN_CAP];
+ u32 pixel_rate;
+ u32 line_rate;
+
+ xm2mvsc_initialize_coeff_banks(chan_ctx);
+
+ pixel_rate = (out_q_data->width * XM2MSC_STEP_PRECISION) /
+ cap_q_data->width;
+ line_rate = (out_q_data->height * XM2MSC_STEP_PRECISION) /
+ cap_q_data->height;
+
+ xm2msc_writereg(base + XM2MSC_PIXELRATE, pixel_rate);
+ xm2msc_writereg(base + XM2MSC_LINERATE, line_rate);
+}
+
+static void xm2msc_program_allchan(struct xm2m_msc_dev *xm2msc)
+{
+ u32 chan;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ struct xm2msc_chan_ctx *chan_ctx;
+
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ xm2msc_set_chan_params(chan_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ xm2msc_set_chan_params(chan_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ xm2msc_set_chan_com_params(chan_ctx);
+ }
+}
+
+static void
+xm2msc_pr_q(struct device *dev, struct xm2msc_q_data *q, int chan,
+ int type, const char *fun_name)
+{
+ unsigned int i;
+ const struct xm2msc_fmt *fmt = q->fmt;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ dev_dbg(dev, "\n\nOUTPUT Q (%d) Context from [[ %s ]]",
+ chan, fun_name);
+ else
+ dev_dbg(dev, "\n\nCAPTURE Q (%d) Context from [[ %s ]]",
+ chan, fun_name);
+
+ dev_dbg(dev, "width height stride clrspace field planes\n");
+ dev_dbg(dev, " %d %d %d %d %d %d\n",
+ q->width, q->height, q->stride,
+ q->colorspace, q->field, q->nbuffs);
+
+ for (i = 0; i < q->nbuffs; i++) {
+ dev_dbg(dev, "[plane %d ] bytesperline sizeimage\n", i);
+ dev_dbg(dev, " %d %d\n",
+ q->bytesperline[i], q->sizeimage[i]);
+ }
+
+ dev_dbg(dev, "fmt_name 4cc xlnx-fmt\n");
+ dev_dbg(dev, "%s %d %d\n",
+ fmt->name, fmt->fourcc, fmt->xm2msc_fmt);
+ dev_dbg(dev, "\n\n");
+}
+
+static void
+xm2msc_pr_status(struct xm2m_msc_dev *xm2msc,
+ const char *fun_name)
+{
+ struct device *dev = xm2msc->dev;
+
+ dev_dbg(dev, "Status in %s\n", fun_name);
+ dev_dbg(dev, "opened_chan out_streamed_chan cap_streamed_chan\n");
+ dev_dbg(dev, "0x%x 0x%x 0x%x\n",
+ xm2msc->opened_chan, xm2msc->out_streamed_chan,
+ xm2msc->cap_streamed_chan);
+ dev_dbg(dev, "\n\n");
+}
+
+static void
+xm2msc_pr_chanctx(struct xm2msc_chan_ctx *ctx, const char *fun_name)
+{
+ struct device *dev = ctx->xm2msc_dev->dev;
+
+ dev_dbg(dev, "\n\n----- [[ %s ]]: Channel %d (0x%p) context -----\n",
+ fun_name, ctx->num, ctx);
+ dev_dbg(dev, "minor = %d\n", ctx->minor);
+ dev_dbg(dev, "reg mapped at %p\n", ctx->regs);
+ dev_dbg(dev, "xm2msc \tm2m_dev \tm2m_ctx\n");
+ dev_dbg(dev, "%p \t%p \t%p\n", ctx->xm2msc_dev,
+ ctx->m2m_dev, ctx->m2m_ctx);
+
+ if (ctx->status & CHAN_OPENED)
+ dev_dbg(dev, "Opened ");
+ if (ctx->status & CHAN_ATTACHED)
+ dev_dbg(dev, "and attached");
+ dev_dbg(dev, "\n");
+ dev_dbg(dev, "-----------------------------------\n");
+ dev_dbg(dev, "\n\n");
+}
+
+static void
+xm2msc_pr_screg(struct device *dev, const void __iomem *base)
+{
+ dev_dbg(dev, "Ctr, GIE, IE, IS OUT\n");
+ dev_dbg(dev, "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ xm2msc_readreg(base + XM2MSC_AP_CTRL),
+ xm2msc_readreg(base + XM2MSC_GIE),
+ xm2msc_readreg(base + XM2MSC_IER),
+ xm2msc_readreg(base + XM2MSC_ISR),
+ xm2msc_readreg(base + XM2MSC_NUM_OUTS));
+}
+
+static void
+xm2msc_pr_chanreg(struct device *dev, struct xm2msc_chan_ctx *chan)
+{
+ const void __iomem *base = chan->regs;
+
+ dev_dbg(dev, "WIN HIN INPIXELFMT INSTRIDE SRCB0L/H SRCB1L/H\n");
+ dev_dbg(dev, "%d %d %d %d 0x%x/0x%x 0x%x/0x%x\n",
+ xm2msc_readreg(base + XM2MSC_WIDTHIN),
+ xm2msc_readreg(base + XM2MSC_HEIGHTIN),
+ xm2msc_readreg(base + XM2MSC_INPIXELFMT),
+ xm2msc_readreg(base + XM2MSC_INSTRIDE),
+ xm2msc_readreg(base + XM2MSC_SRCIMGBUF0),
+ xm2msc_readreg(base + XM2MSC_SRCIMGBUF0 + 4),
+ xm2msc_readreg(base + XM2MSC_SRCIMGBUF1),
+ xm2msc_readreg(base + XM2MSC_SRCIMGBUF1 + 4));
+ dev_dbg(dev, "WOUT HOUT OUTPIXELFMT OUTSTRIDE DBUF0L/H DBUF1L/H\n");
+ dev_dbg(dev, "%d %d %d %d 0x%x/0x%x 0x%x/0x%x\n",
+ xm2msc_readreg(base + XM2MSC_WIDTHOUT),
+ xm2msc_readreg(base + XM2MSC_HEIGHTOUT),
+ xm2msc_readreg(base + XM2MSC_OUTPIXELFMT),
+ xm2msc_readreg(base + XM2MSC_OUTSTRIDE),
+ xm2msc_readreg(base + XM2MSC_DSTIMGBUF0),
+ xm2msc_readreg(base + XM2MSC_DSTIMGBUF0 + 4),
+ chan->num == 4 ?
+ xm2msc_readreg(base +
+ XM2MSC_DSTIMGBUF1 + XM2MSC_RESERVED_AREA) :
+ xm2msc_readreg(base + XM2MSC_DSTIMGBUF1),
+ chan->num == 4 ?
+ xm2msc_readreg(base +
+ XM2MSC_DSTIMGBUF1 + XM2MSC_RESERVED_AREA + 4) :
+ xm2msc_readreg(base + XM2MSC_DSTIMGBUF1 + 4));
+
+ dev_dbg(dev, "LINERATE PIXELRATE\n");
+ dev_dbg(dev, "0x%x 0x%x\n",
+ xm2msc_readreg(base + XM2MSC_LINERATE),
+ xm2msc_readreg(base + XM2MSC_PIXELRATE));
+}
+
+static void
+xm2msc_pr_allchanreg(struct xm2m_msc_dev *xm2msc)
+{
+ unsigned int i;
+ struct xm2msc_chan_ctx *chan_ctx;
+ struct device *dev = xm2msc->dev;
+
+ xm2msc_pr_screg(xm2msc->dev, xm2msc->regs);
+
+ for (i = 0; i < xm2msc->running_chan; i++) {
+ chan_ctx = &xm2msc->xm2msc_chan[i];
+ dev_dbg(dev, "Regs val for channel %d\n", i);
+ dev_dbg(dev, "______________________________________________\n");
+ xm2msc_pr_chanreg(dev, chan_ctx);
+ dev_dbg(dev, "processed frames = %lu\n", chan_ctx->frames);
+ dev_dbg(dev, "______________________________________________\n");
+ }
+}
+
+static inline bool xm2msc_testbit(int num, u32 *addr)
+{
+ return (*addr & BIT(num));
+}
+
+static inline void xm2msc_setbit(int num, u32 *addr)
+{
+ *addr |= BIT(num);
+}
+
+static inline void xm2msc_clrbit(int num, u32 *addr)
+{
+ *addr &= ~BIT(num);
+}
+
+static void xm2msc_stop(struct xm2m_msc_dev *xm2msc)
+{
+ void __iomem *base = xm2msc->regs;
+ u32 data = xm2msc_readreg(base + XM2MSC_AP_CTRL);
+
+ data &= ~XM2MSC_AP_CTRL_START;
+ xm2msc_writereg(base + XM2MSC_AP_CTRL, data);
+}
+
+static void xm2msc_start(struct xm2m_msc_dev *xm2msc)
+{
+ void __iomem *base = xm2msc->regs;
+ u32 data = xm2msc_readreg(base + XM2MSC_AP_CTRL);
+
+ data |= XM2MSC_AP_CTRL_START;
+ xm2msc_writereg(base + XM2MSC_AP_CTRL, data);
+}
+
+static void xm2msc_set_chan(struct xm2msc_chan_ctx *ctx, bool state)
+{
+ mutex_lock(&ctx->xm2msc_dev->mutex);
+ if (state)
+ xm2msc_setbit(ctx->num, &ctx->xm2msc_dev->opened_chan);
+ else
+ xm2msc_clrbit(ctx->num, &ctx->xm2msc_dev->opened_chan);
+ mutex_unlock(&ctx->xm2msc_dev->mutex);
+}
+
+static void
+xm2msc_set_chan_stream(struct xm2msc_chan_ctx *ctx, bool state, int type)
+{
+ u32 *ptr;
+
+ if (type == XM2MSC_CHAN_OUT)
+ ptr = &ctx->xm2msc_dev->out_streamed_chan;
+ else
+ ptr = &ctx->xm2msc_dev->cap_streamed_chan;
+
+ spin_lock(&ctx->xm2msc_dev->lock);
+ if (state)
+ xm2msc_setbit(ctx->num, ptr);
+ else
+ xm2msc_clrbit(ctx->num, ptr);
+
+ spin_unlock(&ctx->xm2msc_dev->lock);
+}
+
+static int
+xm2msc_chk_chan_stream(struct xm2msc_chan_ctx *ctx, int type)
+{
+ u32 *ptr;
+ int ret;
+
+ if (type == XM2MSC_CHAN_OUT)
+ ptr = &ctx->xm2msc_dev->out_streamed_chan;
+ else
+ ptr = &ctx->xm2msc_dev->cap_streamed_chan;
+
+ mutex_lock(&ctx->xm2msc_dev->mutex);
+ ret = xm2msc_testbit(ctx->num, ptr);
+ mutex_unlock(&ctx->xm2msc_dev->mutex);
+
+ return ret;
+}
+
+static void xm2msc_set_fmt(struct xm2m_msc_dev *xm2msc, u32 index)
+{
+ xm2msc_setbit(index, &xm2msc->supported_fmt);
+}
+
+static int xm2msc_chk_fmt(struct xm2m_msc_dev *xm2msc, u32 index)
+{
+ return xm2msc_testbit(index, &xm2msc->supported_fmt);
+}
+
+static void xm2msc_reset(struct xm2m_msc_dev *xm2msc)
+{
+ gpiod_set_value_cansleep(xm2msc->rst_gpio, XM2MSC_RESET_ASSERT);
+ gpiod_set_value_cansleep(xm2msc->rst_gpio, XM2MSC_RESET_DEASSERT);
+}
+
+/*
+ * mem2mem callbacks
+ */
+static int xm2msc_job_ready(void *priv)
+{
+ struct xm2msc_chan_ctx *chan_ctx = priv;
+
+ if ((v4l2_m2m_num_src_bufs_ready(chan_ctx->m2m_ctx) > 0) &&
+ (v4l2_m2m_num_dst_bufs_ready(chan_ctx->m2m_ctx) > 0))
+ return 1;
+ return 0;
+}
+
+static bool xm2msc_alljob_ready(struct xm2m_msc_dev *xm2msc)
+{
+ struct xm2msc_chan_ctx *chan_ctx;
+ unsigned int chan;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ if (!xm2msc_job_ready((void *)chan_ctx)) {
+ dev_dbg(xm2msc->dev, "chan %d not ready\n",
+ chan_ctx->num);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void xm2msc_chan_abort_bufs(struct xm2msc_chan_ctx *chan_ctx)
+{
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ struct vb2_v4l2_buffer *dst_vb, *src_vb;
+
+ spin_lock(&xm2msc->lock);
+ dev_dbg(xm2msc->dev, "aborting all buffers\n");
+
+ while (v4l2_m2m_num_src_bufs_ready(chan_ctx->m2m_ctx) > 0) {
+ src_vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ }
+
+ while (v4l2_m2m_num_dst_bufs_ready(chan_ctx->m2m_ctx) > 0) {
+ dst_vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+ }
+
+ v4l2_m2m_job_finish(chan_ctx->m2m_dev, chan_ctx->m2m_ctx);
+ spin_unlock(&xm2msc->lock);
+}
+
+static void xm2msc_job_abort(void *priv)
+{
+ struct xm2msc_chan_ctx *chan_ctx = priv;
+
+ xm2msc_chan_abort_bufs(chan_ctx);
+
+ /*
+ * Stream off the channel as job_abort may not always
+ * be called after streamoff
+ */
+ xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_OUT);
+ xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_CAP);
+}
+
+static int xm2msc_set_bufaddr(struct xm2m_msc_dev *xm2msc)
+{
+ unsigned int chan;
+ u32 row_align;
+ struct xm2msc_chan_ctx *chan_ctx;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ void __iomem *base;
+ struct xm2msc_q_data *q_data;
+ dma_addr_t src_luma, dst_luma;
+ dma_addr_t src_croma, dst_croma;
+
+ if (!xm2msc_alljob_ready(xm2msc))
+ return -EINVAL;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+ base = chan_ctx->regs;
+
+ src_vb = v4l2_m2m_next_src_buf(chan_ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_next_dst_buf(chan_ctx->m2m_ctx);
+
+ if (!src_vb || !dst_vb) {
+ v4l2_err(&xm2msc->v4l2_dev, "buffer not found chan = %d\n",
+ chan_ctx->num);
+ return -EINVAL;
+ }
+
+ src_luma = vb2_dma_contig_plane_dma_addr(&src_vb->vb2_buf, 0);
+ dst_luma = vb2_dma_contig_plane_dma_addr(&dst_vb->vb2_buf, 0);
+
+ q_data = &chan_ctx->q_data[XM2MSC_CHAN_OUT];
+ row_align = chan_ctx->output_height_align;
+ if (chan_ctx->q_data[XM2MSC_CHAN_OUT].nbuffs == 2)
+ /* fmts having 2 planes 2 buffers */
+ src_croma =
+ vb2_dma_contig_plane_dma_addr(&src_vb->vb2_buf,
+ 1);
+ else if (xm2msc_is_yuv_singlebuff(q_data->fmt->fourcc))
+ /* fmts having 2 planes 1 contiguous buffer */
+ src_croma = src_luma +
+ xm2msc_yuv_1stplane_size(q_data, row_align);
+ else /* fmts having 1 planes 1 contiguous buffer */
+ src_croma = 0;
+
+ q_data = &chan_ctx->q_data[XM2MSC_CHAN_CAP];
+ row_align = chan_ctx->capture_height_align;
+ if (chan_ctx->q_data[XM2MSC_CHAN_CAP].nbuffs == 2)
+ dst_croma =
+ vb2_dma_contig_plane_dma_addr(&dst_vb->vb2_buf,
+ 1);
+ else if (xm2msc_is_yuv_singlebuff(q_data->fmt->fourcc))
+ dst_croma = dst_luma +
+ xm2msc_yuv_1stplane_size(q_data, row_align);
+ else
+ dst_croma = 0;
+
+ if (xm2msc->dma_addr_size == 64 &&
+ sizeof(dma_addr_t) == sizeof(u64)) {
+ xm2msc_write64reg(base + XM2MSC_SRCIMGBUF0, src_luma);
+ xm2msc_write64reg(base + XM2MSC_SRCIMGBUF1, src_croma);
+ xm2msc_write64reg(base + XM2MSC_DSTIMGBUF0, dst_luma);
+ if (chan_ctx->num == 4) /* TODO: To be fixed in HW */
+ xm2msc_write64reg(base + XM2MSC_DSTIMGBUF1 +
+ XM2MSC_RESERVED_AREA,
+ dst_croma);
+ else
+ xm2msc_write64reg(base + XM2MSC_DSTIMGBUF1,
+ dst_croma);
+ } else {
+ xm2msc_writereg(base + XM2MSC_SRCIMGBUF0, src_luma);
+ xm2msc_writereg(base + XM2MSC_SRCIMGBUF1, src_croma);
+ xm2msc_writereg(base + XM2MSC_DSTIMGBUF0, dst_luma);
+ if (chan_ctx->num == 4) /* TODO: To be fixed in HW */
+ xm2msc_writereg(base + XM2MSC_DSTIMGBUF1 +
+ XM2MSC_RESERVED_AREA,
+ dst_croma);
+ else
+ xm2msc_writereg(base + XM2MSC_DSTIMGBUF1,
+ dst_croma);
+ }
+ }
+ return 0;
+}
+
+static void xm2msc_job_finish(struct xm2m_msc_dev *xm2msc)
+{
+ unsigned int chan;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ struct xm2msc_chan_ctx *chan_ctx;
+
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+ v4l2_m2m_job_finish(chan_ctx->m2m_dev, chan_ctx->m2m_ctx);
+ }
+}
+
+static void xm2msc_job_done(struct xm2m_msc_dev *xm2msc)
+{
+ u32 chan;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ struct xm2msc_chan_ctx *chan_ctx;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ unsigned long flags;
+
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ src_vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
+
+ if (src_vb && dst_vb) {
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+ dst_vb->timecode = src_vb->timecode;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |=
+ src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ spin_lock_irqsave(&xm2msc->lock, flags);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ spin_unlock_irqrestore(&xm2msc->lock, flags);
+ }
+ chan_ctx->frames++;
+ }
+}
+
+static void xm2msc_device_run(void *priv)
+{
+ struct xm2msc_chan_ctx *chan_ctx = priv;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ void __iomem *base = xm2msc->regs;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&xm2msc->lock, flags);
+ if (xm2msc->device_busy) {
+ spin_unlock_irqrestore(&xm2msc->lock, flags);
+ return;
+ }
+ xm2msc->device_busy = true;
+
+ if (xm2msc->running_chan != NUM_STREAM(xm2msc)) {
+ dev_dbg(xm2msc->dev, "Running chan was %d\n",
+ xm2msc->running_chan);
+ xm2msc->running_chan = NUM_STREAM(xm2msc);
+
+ /* IP need reset for updating of XM2MSC_NUM_OUT */
+ xm2msc_reset(xm2msc);
+ xm2msc_writereg(base + XM2MSC_NUM_OUTS, xm2msc->running_chan);
+ xm2msc_program_allchan(xm2msc);
+ }
+ spin_unlock_irqrestore(&xm2msc->lock, flags);
+
+ dev_dbg(xm2msc->dev, "Running chan = %d\n", xm2msc->running_chan);
+ if (!xm2msc->running_chan) {
+ xm2msc->device_busy = false;
+ return;
+ }
+
+ ret = xm2msc_set_bufaddr(xm2msc);
+ if (ret) {
+ /*
+ * All channel does not have buffer
+ * Currently we do not handle the removal of any Intermediate
+ * channel while streaming is going on
+ */
+ if (xm2msc->out_streamed_chan || xm2msc->cap_streamed_chan)
+ dev_err(xm2msc->dev,
+ "Buffer not available, streaming chan 0x%x\n",
+ xm2msc->cap_streamed_chan);
+
+ xm2msc->device_busy = false;
+ return;
+ }
+
+ xm2msc_writereg(base + XM2MSC_GIE, XM2MSC_GIE_EN);
+ xm2msc_writereg(base + XM2MSC_IER, XM2MSC_ISR_DONE);
+
+ xm2msc_pr_status(xm2msc, __func__);
+ xm2msc_pr_screg(xm2msc->dev, base);
+ xm2msc_pr_allchanreg(xm2msc);
+
+ xm2msc_start(xm2msc);
+
+ xm2msc->isr_wait = true;
+ wait_event(xm2msc->isr_finished, !xm2msc->isr_wait);
+
+ xm2msc_job_done(xm2msc);
+
+ xm2msc->device_busy = false;
+
+ if (xm2msc_alljob_ready(xm2msc))
+ xm2msc_device_run(xm2msc->xm2msc_chan);
+
+ xm2msc_job_finish(xm2msc);
+}
+
+static irqreturn_t xm2msc_isr(int irq, void *data)
+{
+ struct xm2m_msc_dev *xm2msc = (struct xm2m_msc_dev *)data;
+ void __iomem *base = xm2msc->regs;
+ u32 status;
+
+ status = xm2msc_readreg(base + XM2MSC_ISR);
+ if (!(status & XM2MSC_ISR_DONE))
+ return IRQ_NONE;
+
+ xm2msc_writereg(base + XM2MSC_ISR, status & XM2MSC_ISR_DONE);
+
+ xm2msc_stop(xm2msc);
+
+ xm2msc->isr_wait = false;
+ wake_up(&xm2msc->isr_finished);
+
+ return IRQ_HANDLED;
+}
+
+static int xm2msc_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_streamon(file, chan_ctx->m2m_ctx, type);
+}
+
+static int xm2msc_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+ int ret;
+
+ ret = v4l2_m2m_streamoff(file, chan_ctx->m2m_ctx, type);
+
+ /* Check if any channel is still running */
+ xm2msc_device_run(chan_ctx);
+ return ret;
+}
+
+static int xm2msc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_qbuf(file, chan_ctx->m2m_ctx, buf);
+}
+
+static int xm2msc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_dqbuf(file, chan_ctx->m2m_ctx, buf);
+}
+
+static int xm2msc_expbuf(struct file *file, void *fh,
+ struct v4l2_exportbuffer *eb)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_expbuf(file, chan_ctx->m2m_ctx, eb);
+}
+
+static int xm2msc_createbufs(struct file *file, void *fh,
+ struct v4l2_create_buffers *cb)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_create_bufs(file, chan_ctx->m2m_ctx, cb);
+}
+
+static int xm2msc_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_reqbufs(file, chan_ctx->m2m_ctx, reqbufs);
+}
+
+static int xm2msc_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_querybuf(file, chan_ctx->m2m_ctx, buf);
+}
+
+static void
+xm2msc_cal_imagesize(struct xm2msc_chan_ctx *chan_ctx,
+ struct xm2msc_q_data *q_data, u32 type)
+{
+ unsigned int i;
+ u32 fourcc = q_data->fmt->fourcc;
+ u32 height = q_data->height;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ height = ALIGN(height, chan_ctx->output_height_align);
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ height = ALIGN(height, chan_ctx->capture_height_align);
+
+ for (i = 0; i < q_data->nbuffs; i++) {
+ q_data->bytesperline[i] = q_data->stride;
+ q_data->sizeimage[i] = q_data->stride * height;
+ }
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_XV15:
+ /*
+ * Adding chroma plane size as NV12/XV15
+ * have a contiguous buffer for luma and chroma
+ */
+ q_data->sizeimage[0] +=
+ q_data->stride * (height / 2);
+ break;
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_XV15M:
+ q_data->sizeimage[1] =
+ q_data->stride * (height / 2);
+ break;
+ default:
+ break;
+ }
+}
+
+static unsigned int
+xm2msc_cal_stride(unsigned int width, enum xm2msc_pix_fmt xfmt, u8 ppc)
+{
+ unsigned int stride;
+ u32 align;
+
+ /* Stride in Bytes = (Width × Bytes per Pixel); */
+ switch (xfmt) {
+ case XILINX_M2MSC_FMT_RGBX8:
+ case XILINX_M2MSC_FMT_YUVX8:
+ case XILINX_M2MSC_FMT_RGBX10:
+ case XILINX_M2MSC_FMT_YUVX10:
+ case XILINX_M2MSC_FMT_BGRX8:
+ stride = width * 4;
+ break;
+ case XILINX_M2MSC_FMT_YUYV8:
+ case XILINX_M2MSC_FMT_UYVY8:
+ stride = width * 2;
+ break;
+ case XILINX_M2MSC_FMT_Y_UV8:
+ case XILINX_M2MSC_FMT_Y_UV8_420:
+ case XILINX_M2MSC_FMT_Y8:
+ stride = width * 1;
+ break;
+ case XILINX_M2MSC_FMT_RGB8:
+ case XILINX_M2MSC_FMT_YUV8:
+ case XILINX_M2MSC_FMT_BGR8:
+ stride = width * 3;
+ break;
+ case XILINX_M2MSC_FMT_Y_UV10:
+ case XILINX_M2MSC_FMT_Y_UV10_420:
+ case XILINX_M2MSC_FMT_Y10:
+ /* 4 bytes per 3 pixels */
+ stride = DIV_ROUND_UP(width * 4, 3);
+ break;
+ default:
+ stride = 0;
+ }
+
+ /* The data size is 64*pixels per clock bits */
+ align = ppc * XM2MSC_ALIGN_MUL;
+ stride = ALIGN(stride, align);
+
+ return stride;
+}
+
+static int
+vidioc_try_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
+{
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct xm2msc_q_data *q_data;
+ struct vb2_queue *vq;
+ int index;
+
+ if (pix->width < XM2MSC_MIN_WIDTH || pix->width > xm2msc->max_wd ||
+ pix->height < XM2MSC_MIN_HEIGHT || pix->height > xm2msc->max_ht)
+ dev_dbg(xm2msc->dev,
+ "Wrong input parameters %d, wxh: %dx%d.\n",
+ f->type, f->fmt.pix.width, f->fmt.pix.height);
+
+ /* The width value must be a multiple of pixels per clock */
+ if (pix->width % chan_ctx->xm2msc_dev->ppc) {
+ dev_info(xm2msc->dev,
+ "Wrong align parameters %d, wxh: %dx%d.\n",
+ f->type, f->fmt.pix.width, f->fmt.pix.height);
+ pix->width = ALIGN(pix->width, chan_ctx->xm2msc_dev->ppc);
+ }
+
+ /*
+ * V4L2 specification suggests the driver corrects the
+ * format struct if any of the dimensions is unsupported
+ */
+ if (pix->height < XM2MSC_MIN_HEIGHT)
+ pix->height = XM2MSC_MIN_HEIGHT;
+ else if (pix->height > xm2msc->max_ht)
+ pix->height = xm2msc->max_ht;
+
+ if (pix->width < XM2MSC_MIN_WIDTH)
+ pix->width = XM2MSC_MIN_WIDTH;
+ else if (pix->width > xm2msc->max_wd)
+ pix->width = xm2msc->max_wd;
+
+ vq = v4l2_m2m_get_vq(chan_ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(chan_ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ q_data->fmt = find_format(f);
+ index = find_format_index(f);
+ if (!q_data->fmt || index == ARRAY_SIZE(formats) ||
+ !xm2msc_chk_fmt(xm2msc, index)) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "Couldn't set format type %d, wxh: %dx%d. ",
+ f->type, f->fmt.pix.width, f->fmt.pix.height);
+ v4l2_err(&xm2msc->v4l2_dev,
+ "fmt: %d, field: %d\n",
+ f->fmt.pix.pixelformat, f->fmt.pix.field);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void xm2msc_get_align(struct xm2msc_chan_ctx *chan_ctx)
+{
+ /*
+ * TODO: This is a temporary solution, will be reverted once stride and
+ * height align value come from application.
+ */
+ chan_ctx->output_stride_align = output_stride_align[chan_ctx->num];
+ chan_ctx->capture_stride_align = capture_stride_align[chan_ctx->num];
+ chan_ctx->output_height_align = output_height_align[chan_ctx->num];
+ chan_ctx->capture_height_align = capture_height_align[chan_ctx->num];
+ if (output_stride_align[chan_ctx->num] != 1 ||
+ capture_stride_align[chan_ctx->num] != 1 ||
+ output_height_align[chan_ctx->num] != 1 ||
+ capture_height_align[chan_ctx->num] != 1) {
+ dev_info(chan_ctx->xm2msc_dev->dev,
+ "You entered values other than default values.\n");
+ dev_info(chan_ctx->xm2msc_dev->dev,
+ "Please note this may not be available for longer");
+ dev_info(chan_ctx->xm2msc_dev->dev,
+ "and align values will come from application\n");
+ dev_info(chan_ctx->xm2msc_dev->dev,
+ "value entered are -\n"
+ "output_stride_align = %d\n"
+ "output_height_align = %d\n"
+ "capture_stride_align = %d\n"
+ "capture_height_align = %d\n",
+ chan_ctx->output_stride_align,
+ chan_ctx->output_height_align,
+ chan_ctx->capture_stride_align,
+ chan_ctx->capture_height_align);
+ }
+}
+
+static int
+vidioc_s_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct xm2msc_q_data *q_data = get_q_data(chan_ctx, f->type);
+ unsigned int i;
+ unsigned int align = 1;
+
+ q_data = get_q_data(chan_ctx, f->type);
+
+ q_data->width = pix->width;
+ q_data->height = pix->height;
+ q_data->stride = xm2msc_cal_stride(pix->width,
+ q_data->fmt->xm2msc_fmt,
+ chan_ctx->xm2msc_dev->ppc);
+
+ xm2msc_get_align(chan_ctx);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ align = chan_ctx->output_stride_align;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ align = chan_ctx->capture_stride_align;
+
+ q_data->stride = ALIGN(q_data->stride, align);
+
+ q_data->colorspace = pix->colorspace;
+ q_data->field = pix->field;
+ q_data->nbuffs = q_data->fmt->num_buffs;
+
+ xm2msc_cal_imagesize(chan_ctx, q_data, f->type);
+
+ for (i = 0; i < q_data->nbuffs; i++) {
+ pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ }
+
+ xm2msc_pr_q(chan_ctx->xm2msc_dev->dev, q_data,
+ chan_ctx->num, f->type, __func__);
+
+ return 0;
+}
+
+static int xm2msc_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return vidioc_try_fmt(chan_ctx, f);
+}
+
+static int xm2msc_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return vidioc_try_fmt(chan_ctx, f);
+}
+
+static int xm2msc_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret;
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ ret = xm2msc_try_fmt_vid_cap(file, fh, f);
+ if (ret)
+ return ret;
+ return vidioc_s_fmt(chan_ctx, f);
+}
+
+static int xm2msc_s_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret;
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ ret = xm2msc_try_fmt_vid_out(file, fh, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(chan_ctx, f);
+}
+
+static int vidioc_g_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct xm2msc_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ unsigned int i;
+
+ vq = v4l2_m2m_get_vq(chan_ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(chan_ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ pix->width = q_data->width;
+ pix->height = q_data->height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = q_data->fmt->fourcc;
+ pix->colorspace = q_data->colorspace;
+ pix->num_planes = q_data->nbuffs;
+
+ for (i = 0; i < pix->num_planes; i++) {
+ pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ }
+
+ return 0;
+}
+
+static int xm2msc_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return vidioc_g_fmt(chan_ctx, f);
+}
+
+static int xm2msc_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return vidioc_g_fmt(chan_ctx, f);
+}
+
+static int enum_fmt(struct xm2m_msc_dev *xm2msc, struct v4l2_fmtdesc *f)
+{
+ const struct xm2msc_fmt *fmt;
+ unsigned int i, enabled = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (xm2msc_chk_fmt(xm2msc, i) && enabled++ == f->index)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(formats))
+ /* Format not found */
+ return -EINVAL;
+
+ /* Format found */
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name,
+ sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int xm2msc_enum_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ return enum_fmt(chan_ctx->xm2msc_dev, f);
+}
+
+static int xm2msc_enum_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ return enum_fmt(chan_ctx->xm2msc_dev, f);
+}
+
+static int xm2msc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, XM2MSC_DRIVER_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, XM2MSC_DRIVER_NAME, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", XM2MSC_DRIVER_NAME);
+ /*
+ * This is only a mem-to-mem video device. The STREAMING
+ * device capability flags are left only for compatibility
+ * and are scheduled for removal.
+ */
+ cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int xm2msc_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ unsigned int i;
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vq);
+ struct xm2msc_q_data *q_data;
+
+ q_data = get_q_data(chan_ctx, vq->type);
+ if (!q_data)
+ return -EINVAL;
+
+ *nplanes = q_data->nbuffs;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->sizeimage[i];
+
+ dev_dbg(chan_ctx->xm2msc_dev->dev, "get %d buffer(s) of size %d",
+ *nbuffers, sizes[0]);
+ if (q_data->nbuffs == 2)
+ dev_dbg(chan_ctx->xm2msc_dev->dev, " and %d\n", sizes[1]);
+
+ return 0;
+}
+
+static int xm2msc_buf_prepare(struct vb2_buffer *vb)
+{
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ struct xm2msc_q_data *q_data;
+ unsigned int i, num_buffs;
+
+ q_data = get_q_data(chan_ctx, vb->vb2_queue->type);
+ if (!q_data)
+ return -EINVAL;
+ num_buffs = q_data->nbuffs;
+
+ for (i = 0; i < num_buffs; i++) {
+ if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ v4l2_err(&xm2msc->v4l2_dev, "data will not fit into plane ");
+ v4l2_err(&xm2msc->v4l2_dev, "(%lu < %lu)\n",
+ vb2_plane_size(vb, i),
+ (long)q_data->sizeimage[i]);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < num_buffs; i++)
+ vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
+
+ return 0;
+}
+
+static void xm2msc_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(chan_ctx->m2m_ctx, vbuf);
+}
+
+static void xm2msc_return_all_buffers(struct xm2msc_chan_ctx *chan_ctx,
+ struct vb2_queue *q,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *vb;
+ unsigned long flags;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
+ else
+ vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
+ if (!vb)
+ break;
+ spin_lock_irqsave(&chan_ctx->xm2msc_dev->lock, flags);
+ v4l2_m2m_buf_done(vb, state);
+ spin_unlock_irqrestore(&chan_ctx->xm2msc_dev->lock, flags);
+ }
+}
+
+static int xm2msc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(q);
+ static struct xm2msc_q_data *q_data;
+ int type;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ xm2msc_set_chan_stream(chan_ctx, true, XM2MSC_CHAN_OUT);
+ else
+ xm2msc_set_chan_stream(chan_ctx, true, XM2MSC_CHAN_CAP);
+
+ xm2msc_set_chan_params(chan_ctx, q->type);
+
+ if (xm2msc_chk_chan_stream(chan_ctx, XM2MSC_CHAN_CAP) &&
+ xm2msc_chk_chan_stream(chan_ctx, XM2MSC_CHAN_OUT))
+ xm2msc_set_chan_com_params(chan_ctx);
+
+ type = V4L2_TYPE_IS_OUTPUT(q->type) ?
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q_data = get_q_data(chan_ctx, type);
+ xm2msc_pr_q(chan_ctx->xm2msc_dev->dev, q_data, chan_ctx->num,
+ type, __func__);
+ xm2msc_pr_status(chan_ctx->xm2msc_dev, __func__);
+
+ return 0;
+}
+
+static void xm2msc_stop_streaming(struct vb2_queue *q)
+{
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(q);
+
+ xm2msc_return_all_buffers(chan_ctx, q, VB2_BUF_STATE_ERROR);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_OUT);
+ else
+ xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_CAP);
+}
+
+static const struct vb2_ops xm2msc_qops = {
+ .queue_setup = xm2msc_queue_setup,
+ .buf_prepare = xm2msc_buf_prepare,
+ .buf_queue = xm2msc_buf_queue,
+ .start_streaming = xm2msc_start_streaming,
+ .stop_streaming = xm2msc_stop_streaming,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct xm2msc_chan_ctx *chan_ctx = priv;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = chan_ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &xm2msc_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &xm2msc->dev_mutex;
+ src_vq->dev = xm2msc->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR;
+ dst_vq->drv_priv = chan_ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &xm2msc_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &xm2msc->dev_mutex;
+ dst_vq->dev = xm2msc->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static const struct v4l2_ioctl_ops xm2msc_ioctl_ops = {
+ .vidioc_querycap = xm2msc_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = xm2msc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = xm2msc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap_mplane = xm2msc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap_mplane = xm2msc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out_mplane = xm2msc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out_mplane = xm2msc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out_mplane = xm2msc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out_mplane = xm2msc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = xm2msc_reqbufs,
+ .vidioc_querybuf = xm2msc_querybuf,
+ .vidioc_expbuf = xm2msc_expbuf,
+ .vidioc_create_bufs = xm2msc_createbufs,
+
+ .vidioc_qbuf = xm2msc_qbuf,
+ .vidioc_dqbuf = xm2msc_dqbuf,
+
+ .vidioc_streamon = xm2msc_streamon,
+ .vidioc_streamoff = xm2msc_streamoff,
+};
+
+static void xm2msc_set_q_data(struct xm2msc_chan_ctx *chan_ctx,
+ const struct xm2msc_fmt *fmt,
+ enum v4l2_buf_type type)
+{
+ struct xm2msc_q_data *q_data;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+
+ q_data = get_q_data(chan_ctx, type);
+
+ q_data->fmt = fmt;
+ q_data->width = xm2msc->max_wd;
+ q_data->height = xm2msc->max_ht;
+ q_data->field = V4L2_FIELD_NONE;
+ q_data->nbuffs = q_data->fmt->num_buffs;
+
+ q_data->stride = xm2msc_cal_stride(q_data->width,
+ q_data->fmt->xm2msc_fmt,
+ xm2msc->ppc);
+
+ xm2msc_cal_imagesize(chan_ctx, q_data, type);
+}
+
+static int xm2msc_set_chan_parm(struct xm2msc_chan_ctx *chan_ctx)
+{
+ int ret = 0;
+ unsigned int i;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+
+ chan_ctx->output_stride_align = 1;
+ chan_ctx->output_height_align = 1;
+ chan_ctx->capture_stride_align = 1;
+ chan_ctx->capture_height_align = 1;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (xm2msc_chk_fmt(xm2msc, i))
+ break;
+ }
+
+ /* No supported format */
+ if (i == ARRAY_SIZE(formats)) {
+ dev_err(xm2msc->dev, "no supported format found\n");
+ return -EINVAL;
+ }
+
+ xm2msc_set_q_data(chan_ctx, &formats[i],
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ xm2msc_set_q_data(chan_ctx, &formats[i],
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ return ret;
+}
+
+static int xm2msc_open(struct file *file)
+{
+ struct xm2m_msc_dev *xm2msc = video_drvdata(file);
+ struct xm2msc_chan_ctx *chan_ctx = NULL;
+ u32 minor, chan;
+ int ret;
+
+ if (mutex_lock_interruptible(&xm2msc->dev_mutex))
+ return -ERESTARTSYS;
+
+ minor = iminor(file_inode(file));
+
+ for (chan = 0; chan < xm2msc->max_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ if ((chan_ctx->status & CHAN_ATTACHED) &&
+ chan_ctx->minor == minor)
+ break;
+ }
+
+ if (chan == xm2msc->max_chan) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "%s Chan not found with minor = %d\n",
+ __func__, minor);
+ ret = -EBADF;
+ goto unlock;
+ }
+
+ /* Already opened, do not allow same channel
+ * to be open more then once
+ */
+ if (chan_ctx->status & CHAN_OPENED) {
+ v4l2_warn(&xm2msc->v4l2_dev,
+ "%s Chan already opened for minor = %d\n",
+ __func__, minor);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ v4l2_fh_init(&chan_ctx->fh, &chan_ctx->vfd);
+ file->private_data = &chan_ctx->fh;
+ v4l2_fh_add(&chan_ctx->fh);
+
+ chan_ctx->m2m_ctx = v4l2_m2m_ctx_init(chan_ctx->m2m_dev,
+ chan_ctx, &queue_init);
+ if (IS_ERR(chan_ctx->m2m_ctx)) {
+ ret = PTR_ERR(chan_ctx->m2m_ctx);
+ v4l2_err(&xm2msc->v4l2_dev,
+ "%s Chan M2M CTX not creted for minor %d\n",
+ __func__, minor);
+ goto error_m2m;
+ }
+
+ chan_ctx->fh.m2m_ctx = chan_ctx->m2m_ctx;
+ chan_ctx->status |= CHAN_OPENED;
+ chan_ctx->xm2msc_dev = xm2msc;
+ chan_ctx->frames = 0;
+
+ xm2msc_set_chan(chan_ctx, true);
+
+ v4l2_info(&xm2msc->v4l2_dev, "Channel %d instance created\n", chan);
+
+ mutex_unlock(&xm2msc->dev_mutex);
+ xm2msc_pr_chanctx(chan_ctx, __func__);
+ xm2msc_pr_status(xm2msc, __func__);
+ return 0;
+
+error_m2m:
+ v4l2_fh_del(&chan_ctx->fh);
+ v4l2_fh_exit(&chan_ctx->fh);
+unlock:
+ mutex_unlock(&xm2msc->dev_mutex);
+ xm2msc_pr_chanctx(chan_ctx, __func__);
+ xm2msc_pr_status(xm2msc, __func__);
+ return ret;
+}
+
+static int xm2msc_release(struct file *file)
+{
+ struct xm2m_msc_dev *xm2msc = video_drvdata(file);
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(file->private_data);
+
+ if (mutex_lock_interruptible(&xm2msc->dev_mutex))
+ return -ERESTARTSYS;
+
+ v4l2_m2m_ctx_release(chan_ctx->m2m_ctx);
+ v4l2_fh_del(&chan_ctx->fh);
+ v4l2_fh_exit(&chan_ctx->fh);
+ chan_ctx->status &= ~CHAN_OPENED;
+ xm2msc_set_chan(chan_ctx, false);
+
+ v4l2_info(&xm2msc->v4l2_dev, "Channel %d instance released\n",
+ chan_ctx->num);
+
+ mutex_unlock(&xm2msc->dev_mutex);
+ return 0;
+}
+
+static unsigned int xm2msc_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(file->private_data);
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ int ret;
+
+ mutex_lock(&xm2msc->dev_mutex);
+ ret = v4l2_m2m_poll(file, chan_ctx->m2m_ctx, wait);
+ mutex_unlock(&xm2msc->dev_mutex);
+
+ return ret;
+}
+
+static int xm2msc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct xm2msc_chan_ctx *chan_ctx = file->private_data;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ int ret;
+
+ mutex_lock(&xm2msc->dev_mutex);
+ ret = v4l2_m2m_mmap(file, chan_ctx->m2m_ctx, vma);
+
+ mutex_unlock(&xm2msc->dev_mutex);
+ return ret;
+}
+
+static const struct v4l2_file_operations xm2msc_fops = {
+ .owner = THIS_MODULE,
+ .open = xm2msc_open,
+ .release = xm2msc_release,
+ .poll = xm2msc_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = xm2msc_mmap,
+};
+
+static const struct video_device xm2msc_videodev = {
+ .name = XM2MSC_DRIVER_NAME,
+ .fops = &xm2msc_fops,
+ .ioctl_ops = &xm2msc_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static const struct v4l2_m2m_ops xm2msc_m2m_ops = {
+ .device_run = xm2msc_device_run,
+ .job_ready = xm2msc_job_ready,
+ .job_abort = xm2msc_job_abort,
+};
+
+static int xm2msc_parse_of(struct platform_device *pdev,
+ struct xm2m_msc_dev *xm2msc)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ int hw_vid_fmt_cnt;
+ const char *vid_fmts[ARRAY_SIZE(formats)];
+ int ret;
+ u32 i, j;
+
+ xm2msc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(xm2msc->clk)) {
+ ret = PTR_ERR(xm2msc->clk);
+ dev_err(dev, "failed to get clk (%d)\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xm2msc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR((__force void *)xm2msc->regs))
+ return PTR_ERR((__force const void *)xm2msc->regs);
+
+ dev_dbg(dev, "IO Mem 0x%llx mapped at %p\n", res->start, xm2msc->regs);
+
+ ret = of_property_read_u32(node, "xlnx,max-chan",
+ &xm2msc->max_chan);
+ if (ret < 0)
+ return ret;
+
+ if (xm2msc->max_chan < XM2MSC_MIN_CHAN ||
+ xm2msc->max_chan > XM2MSC_MAX_CHAN) {
+ dev_err(dev,
+ "Invalid maximum scaler channels : %d",
+ xm2msc->max_chan);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width",
+ &xm2msc->max_wd);
+ if (ret < 0) {
+ dev_err(dev,
+ "missing xlnx,max-width prop\n");
+ return ret;
+ }
+
+ if (xm2msc->max_wd < XM2MSC_MIN_WIDTH ||
+ xm2msc->max_wd > XM2MSC_MAX_WIDTH) {
+ dev_err(dev, "Invalid width : %d",
+ xm2msc->max_wd);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height",
+ &xm2msc->max_ht);
+ if (ret < 0) {
+ dev_err(dev, "missing xlnx,max-height prop\n");
+ return ret;
+ }
+
+ if (xm2msc->max_ht < XM2MSC_MIN_HEIGHT ||
+ xm2msc->max_ht > XM2MSC_MAX_HEIGHT) {
+ dev_err(dev, "Invalid height : %d",
+ xm2msc->max_ht);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,dma-addr-width",
+ &xm2msc->dma_addr_size);
+ if (ret || (xm2msc->dma_addr_size != 32 &&
+ xm2msc->dma_addr_size != 64)) {
+ dev_err(dev, "missing/invalid addr width dts prop\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u8(node, "xlnx,pixels-per-clock",
+ &xm2msc->ppc);
+ if (ret || (xm2msc->ppc != 1 && xm2msc->ppc != 2 && xm2msc->ppc != 4)) {
+ dev_err(dev, "missing or invalid pixels per clock dts prop\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-taps",
+ &xm2msc->taps);
+ if (ret || (xm2msc->taps != XSCALER_TAPS_6 &&
+ xm2msc->taps != XSCALER_TAPS_8 &&
+ xm2msc->taps != XSCALER_TAPS_10 &&
+ xm2msc->taps != XSCALER_TAPS_12)) {
+ dev_err(dev, "missing/invalid taps in dts prop\n");
+ return -EINVAL;
+ }
+
+ xm2msc->irq = irq_of_parse_and_map(node, 0);
+ if (xm2msc->irq < 0) {
+ dev_err(dev, "Unable to get IRQ");
+ return xm2msc->irq;
+ }
+
+ dev_dbg(dev, "Max Channel Supported = %d\n", xm2msc->max_chan);
+ dev_dbg(dev, "DMA Addr width Supported = %d\n", xm2msc->dma_addr_size);
+ dev_dbg(dev, "Max col/row Supported = (%d) / (%d)\n",
+ xm2msc->max_wd, xm2msc->max_ht);
+ dev_dbg(dev, "taps Supported = %d\n", xm2msc->taps);
+ /* read supported video formats and update internal table */
+ hw_vid_fmt_cnt = of_property_count_strings(node, "xlnx,vid-formats");
+
+ ret = of_property_read_string_array(node, "xlnx,vid-formats",
+ vid_fmts, hw_vid_fmt_cnt);
+ if (ret < 0) {
+ dev_err(dev,
+ "Missing or invalid xlnx,vid-formats dts prop\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "Supported format = ");
+ for (i = 0; i < hw_vid_fmt_cnt; i++) {
+ const char *vid_fmt_name = vid_fmts[i];
+
+ for (j = 0; j < ARRAY_SIZE(formats); j++) {
+ const char *dts_name = formats[j].name;
+
+ if (strcmp(vid_fmt_name, dts_name))
+ continue;
+ dev_dbg(dev, "%s ", dts_name);
+
+ xm2msc_set_fmt(xm2msc, j);
+ }
+ }
+ dev_dbg(dev, "\n");
+ xm2msc->rst_gpio = devm_gpiod_get(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(xm2msc->rst_gpio)) {
+ ret = PTR_ERR(xm2msc->rst_gpio);
+ if (ret == -EPROBE_DEFER)
+ dev_info(dev,
+ "Probe deferred due to GPIO reset defer\n");
+ else
+ dev_err(dev,
+ "Unable to locate reset property in dt\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void xm2msc_unreg_video_n_m2m(struct xm2m_msc_dev *xm2msc)
+{
+ struct xm2msc_chan_ctx *chan_ctx;
+ unsigned int chan;
+
+ for (chan = 0; chan < xm2msc->max_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+ if (!(chan_ctx->status & CHAN_ATTACHED))
+ break; /*We register video sequentially */
+ video_unregister_device(&chan_ctx->vfd);
+ chan_ctx->status &= ~CHAN_ATTACHED;
+
+ if (!IS_ERR(chan_ctx->m2m_dev))
+ v4l2_m2m_release(chan_ctx->m2m_dev);
+ }
+}
+
+static int xm2m_msc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct xm2m_msc_dev *xm2msc;
+ struct xm2msc_chan_ctx *chan_ctx;
+ struct video_device *vfd;
+ unsigned int chan;
+
+ xm2msc = devm_kzalloc(&pdev->dev, sizeof(*xm2msc), GFP_KERNEL);
+ if (!xm2msc)
+ return -ENOMEM;
+
+ ret = xm2msc_parse_of(pdev, xm2msc);
+ if (ret < 0)
+ return ret;
+
+ xm2msc->dev = &pdev->dev;
+
+ ret = clk_prepare_enable(xm2msc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clk (%d)\n", ret);
+ return ret;
+ }
+
+ xm2msc_reset(xm2msc);
+
+ spin_lock_init(&xm2msc->lock);
+
+ ret = v4l2_device_register(&pdev->dev, &xm2msc->v4l2_dev);
+ if (ret)
+ goto reg_dev_err;
+
+ for (chan = 0; chan < xm2msc->max_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ vfd = &chan_ctx->vfd;
+ *vfd = xm2msc_videodev;
+ vfd->lock = &xm2msc->dev_mutex;
+ vfd->v4l2_dev = &xm2msc->v4l2_dev;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, chan);
+ if (ret) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "Failed to register video dev for chan %d\n",
+ chan);
+ goto unreg_dev;
+ }
+
+ chan_ctx->status = CHAN_ATTACHED;
+
+ video_set_drvdata(vfd, xm2msc);
+ snprintf(vfd->name, sizeof(vfd->name),
+ "%s", xm2msc_videodev.name);
+ v4l2_info(&xm2msc->v4l2_dev,
+ " Device registered as /dev/video%d\n", vfd->num);
+
+ dev_dbg(xm2msc->dev, "%s Device registered as /dev/video%d\n",
+ __func__, vfd->num);
+
+ chan_ctx->m2m_dev = v4l2_m2m_init(&xm2msc_m2m_ops);
+ if (IS_ERR(chan_ctx->m2m_dev)) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "Failed to init mem2mem device for chan %d\n",
+ chan);
+ ret = PTR_ERR(chan_ctx->m2m_dev);
+ goto unreg_dev;
+ }
+ chan_ctx->xm2msc_dev = xm2msc;
+ chan_ctx->regs = xm2msc->regs + XM2MSC_CHAN_REGS_START(chan);
+ if (chan > 4) /* TODO: To be fixed in HW */
+ chan_ctx->regs += XM2MSC_RESERVED_AREA;
+ chan_ctx->num = chan;
+ chan_ctx->minor = vfd->minor;
+
+ /* Set channel parameters to default values */
+ ret = xm2msc_set_chan_parm(chan_ctx);
+ if (ret)
+ goto unreg_dev;
+
+ xm2msc_pr_chanctx(chan_ctx, __func__);
+ }
+
+ mutex_init(&xm2msc->dev_mutex);
+ mutex_init(&xm2msc->mutex);
+ init_waitqueue_head(&xm2msc->isr_finished);
+
+ ret = devm_request_irq(&pdev->dev, xm2msc->irq,
+ xm2msc_isr, IRQF_SHARED,
+ XM2MSC_DRIVER_NAME, xm2msc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to register IRQ\n");
+ goto unreg_dev;
+ }
+
+ platform_set_drvdata(pdev, xm2msc);
+
+ return 0;
+
+unreg_dev:
+ xm2msc_unreg_video_n_m2m(xm2msc);
+ v4l2_device_unregister(&xm2msc->v4l2_dev);
+reg_dev_err:
+ clk_disable_unprepare(xm2msc->clk);
+ return ret;
+}
+
+static int xm2m_msc_remove(struct platform_device *pdev)
+{
+ struct xm2m_msc_dev *xm2msc = platform_get_drvdata(pdev);
+
+ xm2msc_unreg_video_n_m2m(xm2msc);
+ v4l2_device_unregister(&xm2msc->v4l2_dev);
+ clk_disable_unprepare(xm2msc->clk);
+ return 0;
+}
+
+static const struct of_device_id xm2m_msc_of_id_table[] = {
+ {.compatible = "xlnx,v-multi-scaler-v1.0"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, xm2m_msc_of_id_table);
+
+static struct platform_driver xm2m_msc_driver = {
+ .driver = {
+ .name = "xilinx-multiscaler",
+ .of_match_table = xm2m_msc_of_id_table,
+ },
+ .probe = xm2m_msc_probe,
+ .remove = xm2m_msc_remove,
+};
+
+module_platform_driver(xm2m_msc_driver);
+
+MODULE_DESCRIPTION("Xilinx M2M Multi-Scaler Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("xlnx_m2m_multiscaler_dev");
diff --git a/drivers/media/platform/xilinx/xilinx-remapper.c b/drivers/media/platform/xilinx/xilinx-remapper.c
new file mode 100644
index 000000000000..d2e84ec1f2d6
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-remapper.c
@@ -0,0 +1,546 @@
+/*
+ * Xilinx Video Remapper
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XREMAP_MIN_WIDTH 1
+#define XREMAP_DEF_WIDTH 1920
+#define XREMAP_MAX_WIDTH 65535
+#define XREMAP_MIN_HEIGHT 1
+#define XREMAP_DEF_HEIGHT 1080
+#define XREMAP_MAX_HEIGHT 65535
+
+#define XREMAP_PAD_SINK 0
+#define XREMAP_PAD_SOURCE 1
+
+/**
+ * struct xremap_mapping_output - Output format description
+ * @code: media bus pixel core after remapping
+ * @num_components: number of pixel components after remapping
+ * @component_maps: configuration array corresponding to this output
+ */
+struct xremap_mapping_output {
+ u32 code;
+ unsigned int num_components;
+ unsigned int component_maps[4];
+};
+
+/**
+ * struct xremap_mapping - Input-output remapping description
+ * @code: media bus pixel code before remapping
+ * @width: video bus width in bits
+ * @num_components: number of pixel components before remapping
+ * @outputs: array of possible output formats
+ */
+struct xremap_mapping {
+ u32 code;
+ unsigned int width;
+ unsigned int num_components;
+ const struct xremap_mapping_output *outputs;
+};
+
+/**
+ * struct xremap_device - Xilinx Test Pattern Generator device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @config: device configuration parsed from its DT node
+ * @config.width: video bus width in bits
+ * @config.num_s_components: number of pixel components at the input
+ * @config.num_m_components: number of pixel components at the output
+ * @config.component_maps: component remapping configuration
+ * @default_mapping: Default mapping compatible with the configuration
+ * @default_output: Default output format for the default mapping
+ */
+struct xremap_device {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+
+ struct {
+ unsigned int width;
+ unsigned int num_s_components;
+ unsigned int num_m_components;
+ unsigned int component_maps[4];
+ } config;
+
+ const struct xremap_mapping *default_mapping;
+ const struct xremap_mapping_output *default_output;
+};
+
+static inline struct xremap_device *to_remap(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xremap_device, xvip.subdev);
+}
+
+/* -----------------------------------------------------------------------------
+ * Mappings
+ */
+
+static const struct xremap_mapping xremap_mappings[] = {
+ {
+ .code = MEDIA_BUS_FMT_RBG888_1X24,
+ .width = 8,
+ .num_components = 3,
+ .outputs = (const struct xremap_mapping_output[]) {
+ { MEDIA_BUS_FMT_RGB888_1X32_PADHI, 4, { 1, 0, 2, 4 } },
+ { },
+ },
+ },
+};
+
+static const struct xremap_mapping_output *
+xremap_match_mapping(struct xremap_device *xremap,
+ const struct xremap_mapping *mapping)
+{
+ const struct xremap_mapping_output *output;
+
+ if (mapping->width != xremap->config.width ||
+ mapping->num_components != xremap->config.num_s_components)
+ return NULL;
+
+ for (output = mapping->outputs; output->code; ++output) {
+ unsigned int i;
+
+ if (output->num_components != xremap->config.num_m_components)
+ continue;
+
+ for (i = 0; i < output->num_components; ++i) {
+ if (output->component_maps[i] !=
+ xremap->config.component_maps[i])
+ break;
+ }
+
+ if (i == output->num_components)
+ return output;
+ }
+
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int xremap_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct xremap_device *xremap = to_remap(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ if (code->pad == XREMAP_PAD_SINK) {
+ const struct xremap_mapping *mapping = NULL;
+ unsigned int index = code->index + 1;
+ unsigned int i;
+
+ /* Iterate through the mappings and skip the ones that don't
+ * match the remapper configuration until we reach the requested
+ * index.
+ */
+ for (i = 0; i < ARRAY_SIZE(xremap_mappings) && index; ++i) {
+ mapping = &xremap_mappings[i];
+
+ if (xremap_match_mapping(xremap, mapping))
+ index--;
+ }
+
+ /* If the index was larger than the number of supported mappings
+ * return -EINVAL.
+ */
+ if (index > 0)
+ return -EINVAL;
+
+ code->code = mapping->code;
+ } else {
+ if (code->index)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, code->pad);
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+static int xremap_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == XREMAP_PAD_SINK) {
+ /* The remapper doesn't restrict the size on the sink pad. */
+ fse->min_width = XREMAP_MIN_WIDTH;
+ fse->max_width = XREMAP_MAX_WIDTH;
+ fse->min_height = XREMAP_MIN_HEIGHT;
+ fse->max_height = XREMAP_MAX_HEIGHT;
+ } else {
+ /* The size on the source pad are fixed and always identical to
+ * the size on the sink pad.
+ */
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+xremap_get_pad_format(struct xremap_device *xremap,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xremap->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xremap->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xremap_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xremap_device *xremap = to_remap(subdev);
+
+ fmt->format = *xremap_get_pad_format(xremap, cfg, fmt->pad, fmt->which);
+
+ return 0;
+}
+
+static int xremap_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xremap_device *xremap = to_remap(subdev);
+ const struct xremap_mapping_output *output;
+ const struct xremap_mapping *mapping;
+ struct v4l2_mbus_framefmt *format;
+ unsigned int i;
+
+ format = xremap_get_pad_format(xremap, cfg, fmt->pad, fmt->which);
+
+ if (fmt->pad == XREMAP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ /* Find the mapping. If the requested format has no mapping, use the
+ * default.
+ */
+ for (i = 0; i < ARRAY_SIZE(xremap_mappings); ++i) {
+ mapping = &xremap_mappings[i];
+ if (mapping->code != fmt->format.code)
+ continue;
+
+ output = xremap_match_mapping(xremap, mapping);
+ if (output)
+ break;
+ }
+
+ if (!output) {
+ mapping = xremap->default_mapping;
+ output = xremap->default_output;
+ }
+
+ format->code = mapping->code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XREMAP_MIN_WIDTH, XREMAP_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XREMAP_MIN_HEIGHT, XREMAP_MAX_HEIGHT);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = xremap_get_pad_format(xremap, cfg, XREMAP_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+ format->code = output->code;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+/*
+ * xremap_init_formats - Initialize formats on all pads
+ * @subdev: remapper V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static void xremap_init_formats(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ struct xremap_device *xremap = to_remap(subdev);
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+
+ format.pad = XREMAP_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = xremap->default_mapping->code;
+ format.format.width = XREMAP_DEF_WIDTH;
+ format.format.height = XREMAP_DEF_HEIGHT;
+
+ xremap_set_format(subdev, fh ? fh->pad : NULL, &format);
+}
+
+static int xremap_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ xremap_init_formats(subdev, fh);
+
+ return 0;
+}
+
+static int xremap_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops xremap_core_ops = {
+};
+
+static struct v4l2_subdev_video_ops xremap_video_ops = {
+};
+
+static struct v4l2_subdev_pad_ops xremap_pad_ops = {
+ .enum_mbus_code = xremap_enum_mbus_code,
+ .enum_frame_size = xremap_enum_frame_size,
+ .get_fmt = xremap_get_format,
+ .set_fmt = xremap_set_format,
+};
+
+static struct v4l2_subdev_ops xremap_ops = {
+ .core = &xremap_core_ops,
+ .video = &xremap_video_ops,
+ .pad = &xremap_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xremap_internal_ops = {
+ .open = xremap_open,
+ .close = xremap_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xremap_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xremap_parse_of(struct xremap_device *xremap)
+{
+ struct device_node *node = xremap->xvip.dev->of_node;
+ unsigned int i;
+ int ret;
+
+ /* Parse the DT properties. */
+ ret = of_property_read_u32(node, "xlnx,video-width",
+ &xremap->config.width);
+ if (ret < 0) {
+ dev_dbg(xremap->xvip.dev, "unable to parse %s property\n",
+ "xlnx,video-width");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "#xlnx,s-components",
+ &xremap->config.num_s_components);
+ if (ret < 0) {
+ dev_dbg(xremap->xvip.dev, "unable to parse %s property\n",
+ "#xlnx,s-components");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "#xlnx,m-components",
+ &xremap->config.num_m_components);
+ if (ret < 0) {
+ dev_dbg(xremap->xvip.dev, "unable to parse %s property\n",
+ "#xlnx,m-components");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_array(node, "xlnx,component-maps",
+ xremap->config.component_maps,
+ xremap->config.num_m_components);
+ if (ret < 0) {
+ dev_dbg(xremap->xvip.dev, "unable to parse %s property\n",
+ "xlnx,component-maps");
+ return -EINVAL;
+ }
+
+ /* Validate the parsed values. */
+ if (xremap->config.num_s_components > 4 ||
+ xremap->config.num_m_components > 4) {
+ dev_dbg(xremap->xvip.dev,
+ "invalid number of components (s %u m %u)\n",
+ xremap->config.num_s_components,
+ xremap->config.num_m_components);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < xremap->config.num_m_components; ++i) {
+ if (xremap->config.component_maps[i] > 4) {
+ dev_dbg(xremap->xvip.dev, "invalid map %u @%u\n",
+ xremap->config.component_maps[i], i);
+ return -EINVAL;
+ }
+ }
+
+ /* Find the first mapping that matches the remapper configuration and
+ * store it as the default mapping.
+ */
+ for (i = 0; i < ARRAY_SIZE(xremap_mappings); ++i) {
+ const struct xremap_mapping_output *output;
+ const struct xremap_mapping *mapping;
+
+ mapping = &xremap_mappings[i];
+ output = xremap_match_mapping(xremap, mapping);
+
+ if (output) {
+ xremap->default_mapping = mapping;
+ xremap->default_output = output;
+ return 0;
+ }
+ }
+
+ dev_err(xremap->xvip.dev,
+ "No format compatible with device configuration\n");
+
+ return -EINVAL;
+}
+
+static int xremap_probe(struct platform_device *pdev)
+{
+ struct xremap_device *xremap;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ xremap = devm_kzalloc(&pdev->dev, sizeof(*xremap), GFP_KERNEL);
+ if (!xremap)
+ return -ENOMEM;
+
+ xremap->xvip.dev = &pdev->dev;
+
+ ret = xremap_parse_of(xremap);
+ if (ret < 0)
+ return ret;
+
+ xremap->xvip.clk = devm_clk_get(xremap->xvip.dev, NULL);
+ if (IS_ERR(xremap->xvip.clk))
+ return PTR_ERR(xremap->xvip.clk);
+
+ clk_prepare_enable(xremap->xvip.clk);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xremap->xvip.subdev;
+ v4l2_subdev_init(subdev, &xremap_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xremap_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xremap);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ xremap_init_formats(subdev, NULL);
+
+ xremap->pads[XREMAP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xremap->pads[XREMAP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xremap_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xremap->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xremap);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ dev_info(&pdev->dev, "device registered\n");
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ clk_disable_unprepare(xremap->xvip.clk);
+ return ret;
+}
+
+static int xremap_remove(struct platform_device *pdev)
+{
+ struct xremap_device *xremap = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xremap->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+
+ clk_disable_unprepare(xremap->xvip.clk);
+
+ return 0;
+}
+
+static const struct of_device_id xremap_of_id_table[] = {
+ { .compatible = "xlnx,v-remapper" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xremap_of_id_table);
+
+static struct platform_driver xremap_driver = {
+ .driver = {
+ .name = "xilinx-remapper",
+ .of_match_table = xremap_of_id_table,
+ },
+ .probe = xremap_probe,
+ .remove = xremap_remove,
+};
+
+module_platform_driver(xremap_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx Video Remapper Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-rgb2yuv.c b/drivers/media/platform/xilinx/xilinx-rgb2yuv.c
new file mode 100644
index 000000000000..20ae95946ca3
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-rgb2yuv.c
@@ -0,0 +1,566 @@
+/*
+ * Xilinx RGB to YUV Convertor
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XRGB2YUV_YMAX 0x100
+#define XRGB2YUV_YMIN 0x104
+#define XRGB2YUV_CBMAX 0x108
+#define XRGB2YUV_CBMIN 0x10c
+#define XRGB2YUV_CRMAX 0x110
+#define XRGB2YUV_CRMIN 0x114
+#define XRGB2YUV_YOFFSET 0x118
+#define XRGB2YUV_CBOFFSET 0x11c
+#define XRGB2YUV_CROFFSET 0x120
+#define XRGB2YUV_ACOEF 0x124
+#define XRGB2YUV_BCOEF 0x128
+#define XRGB2YUV_CCOEF 0x12c
+#define XRGB2YUV_DCOEF 0x130
+
+/**
+ * struct xrgb2yuv_device - Xilinx RGB2YUV device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: Xilinx Video IP formats
+ * @ctrl_handler: control handler
+ */
+struct xrgb2yuv_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+static inline struct xrgb2yuv_device *to_rgb2yuv(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xrgb2yuv_device, xvip.subdev);
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xrgb2yuv_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xrgb2yuv_device *xrgb2yuv = to_rgb2yuv(subdev);
+
+ if (!enable) {
+ xvip_stop(&xrgb2yuv->xvip);
+ return 0;
+ }
+
+ xvip_set_frame_size(&xrgb2yuv->xvip, &xrgb2yuv->formats[XVIP_PAD_SINK]);
+
+ xvip_start(&xrgb2yuv->xvip);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xrgb2yuv_get_pad_format(struct xrgb2yuv_device *xrgb2yuv,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xrgb2yuv->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xrgb2yuv->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xrgb2yuv_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xrgb2yuv_device *xrgb2yuv = to_rgb2yuv(subdev);
+
+ fmt->format = *__xrgb2yuv_get_pad_format(xrgb2yuv, cfg, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int xrgb2yuv_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xrgb2yuv_device *xrgb2yuv = to_rgb2yuv(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xrgb2yuv_get_pad_format(xrgb2yuv, cfg, fmt->pad, fmt->which);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ xvip_set_format_size(format, fmt);
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = __xrgb2yuv_get_pad_format(xrgb2yuv, cfg, XVIP_PAD_SOURCE,
+ fmt->which);
+
+ xvip_set_format_size(format, fmt);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int xrgb2yuv_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xrgb2yuv_device *xrgb2yuv = to_rgb2yuv(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xrgb2yuv->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xrgb2yuv->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xrgb2yuv_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xrgb2yuv_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct xrgb2yuv_device *xrgb2yuv =
+ container_of(ctrl->handler, struct xrgb2yuv_device,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_RGB2YUV_YMAX:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_YMAX, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_YMIN:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_YMIN, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CBMAX:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CBMAX, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CBMIN:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CBMIN, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CRMAX:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CRMAX, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CRMIN:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CRMIN, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_YOFFSET:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_YOFFSET, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CBOFFSET:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CBOFFSET, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CROFFSET:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CROFFSET, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_ACOEF:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_ACOEF, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_BCOEF:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_BCOEF, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CCOEF:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CCOEF, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_DCOEF:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_DCOEF, ctrl->val);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops xrgb2yuv_ctrl_ops = {
+ .s_ctrl = xrgb2yuv_s_ctrl,
+};
+
+static struct v4l2_subdev_video_ops xrgb2yuv_video_ops = {
+ .s_stream = xrgb2yuv_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xrgb2yuv_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xrgb2yuv_get_format,
+ .set_fmt = xrgb2yuv_set_format,
+};
+
+static struct v4l2_subdev_ops xrgb2yuv_ops = {
+ .video = &xrgb2yuv_video_ops,
+ .pad = &xrgb2yuv_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xrgb2yuv_internal_ops = {
+ .open = xrgb2yuv_open,
+ .close = xrgb2yuv_close,
+};
+
+/*
+ * Control Configs
+ */
+
+static struct v4l2_ctrl_config xrgb2yuv_ctrls[] = {
+ {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_YMAX,
+ .name = "RGB to YUV: Maximum Y value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_YMIN,
+ .name = "RGB to YUV: Minimum Y value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CBMAX,
+ .name = "RGB to YUV: Maximum Cb value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CBMIN,
+ .name = "RGB to YUV: Minimum Cb value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CRMAX,
+ .name = "RGB to YUV: Maximum Cr value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CRMIN,
+ .name = "RGB to YUV: Minimum Cr value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_YOFFSET,
+ .name = "RGB to YUV: Luma offset",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CBOFFSET,
+ .name = "RGB to YUV: Chroma Cb offset",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CROFFSET,
+ .name = "RGB to YUV: Chroma Cr offset",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_ACOEF,
+ .name = "RGB to YUV: CA coefficient",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -((1 << 17) - 1),
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_BCOEF,
+ .name = "RGB to YUV: CB coefficient",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -((1 << 17) - 1),
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CCOEF,
+ .name = "RGB to YUV: CC coefficient",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -((1 << 17) - 1),
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_DCOEF,
+ .name = "RGB to YUV: CD coefficient",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -((1 << 17) - 1),
+ .max = (1 << 17) - 1,
+ .step = 1,
+ },
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xrgb2yuv_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xrgb2yuv_pm_suspend(struct device *dev)
+{
+ struct xrgb2yuv_device *xrgb2yuv = dev_get_drvdata(dev);
+
+ xvip_suspend(&xrgb2yuv->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xrgb2yuv_pm_resume(struct device *dev)
+{
+ struct xrgb2yuv_device *xrgb2yuv = dev_get_drvdata(dev);
+
+ xvip_resume(&xrgb2yuv->xvip);
+
+ return 0;
+}
+
+/*
+ * Platform Device Driver
+ */
+
+static int xrgb2yuv_parse_of(struct xrgb2yuv_device *xrgb2yuv)
+{
+ struct device *dev = xrgb2yuv->xvip.dev;
+ struct device_node *node = xrgb2yuv->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id;
+ int ret;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "no reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "invalid reg in DT");
+ return -EINVAL;
+ }
+
+ xrgb2yuv->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ return 0;
+}
+
+static int xrgb2yuv_probe(struct platform_device *pdev)
+{
+ struct xrgb2yuv_device *xrgb2yuv;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ unsigned int i;
+ int ret;
+
+ xrgb2yuv = devm_kzalloc(&pdev->dev, sizeof(*xrgb2yuv), GFP_KERNEL);
+ if (!xrgb2yuv)
+ return -ENOMEM;
+
+ xrgb2yuv->xvip.dev = &pdev->dev;
+
+ ret = xrgb2yuv_parse_of(xrgb2yuv);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xrgb2yuv->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and initialize the core */
+ xvip_reset(&xrgb2yuv->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xrgb2yuv->xvip.subdev;
+ v4l2_subdev_init(subdev, &xrgb2yuv_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xrgb2yuv_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xrgb2yuv);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xrgb2yuv->default_formats[XVIP_PAD_SINK];
+ default_format->code = xrgb2yuv->vip_formats[XVIP_PAD_SINK]->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ xvip_get_frame_size(&xrgb2yuv->xvip, default_format);
+
+ xrgb2yuv->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xrgb2yuv->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xrgb2yuv->default_formats[XVIP_PAD_SINK];
+ default_format->code = xrgb2yuv->vip_formats[XVIP_PAD_SOURCE]->code;
+
+ xrgb2yuv->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xrgb2yuv->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xrgb2yuv->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xrgb2yuv_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xrgb2yuv->pads);
+ if (ret < 0)
+ goto error;
+
+ v4l2_ctrl_handler_init(&xrgb2yuv->ctrl_handler, 13);
+
+ for (i = 0; i < ARRAY_SIZE(xrgb2yuv_ctrls); i++) {
+ xrgb2yuv_ctrls[i].def = xvip_read(&xrgb2yuv->xvip,
+ XRGB2YUV_YMAX + i * 4);
+ v4l2_ctrl_new_custom(&xrgb2yuv->ctrl_handler,
+ &xrgb2yuv_ctrls[i], NULL);
+ }
+
+ if (xrgb2yuv->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xrgb2yuv->ctrl_handler.error;
+ goto error;
+ }
+ subdev->ctrl_handler = &xrgb2yuv->ctrl_handler;
+
+ platform_set_drvdata(pdev, xrgb2yuv);
+
+ xvip_print_version(&xrgb2yuv->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xrgb2yuv->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xrgb2yuv->xvip);
+ return ret;
+}
+
+static int xrgb2yuv_remove(struct platform_device *pdev)
+{
+ struct xrgb2yuv_device *xrgb2yuv = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xrgb2yuv->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xrgb2yuv->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xrgb2yuv->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xrgb2yuv_pm_ops, xrgb2yuv_pm_suspend,
+ xrgb2yuv_pm_resume);
+
+static const struct of_device_id xrgb2yuv_of_id_table[] = {
+ { .compatible = "xlnx,v-rgb2yuv-7.1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xrgb2yuv_of_id_table);
+
+static struct platform_driver xrgb2yuv_driver = {
+ .driver = {
+ .name = "xilinx-rgb2yuv",
+ .pm = &xrgb2yuv_pm_ops,
+ .of_match_table = xrgb2yuv_of_id_table,
+ },
+ .probe = xrgb2yuv_probe,
+ .remove = xrgb2yuv_remove,
+};
+
+module_platform_driver(xrgb2yuv_driver);
+
+MODULE_DESCRIPTION("Xilinx RGB to YUV Converter Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-scaler.c b/drivers/media/platform/xilinx/xilinx-scaler.c
new file mode 100644
index 000000000000..bb0d52627a50
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scaler.c
@@ -0,0 +1,708 @@
+/*
+ * Xilinx Scaler
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fixp-arith.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XSCALER_MIN_WIDTH 32
+#define XSCALER_MAX_WIDTH 4096
+#define XSCALER_MIN_HEIGHT 32
+#define XSCALER_MAX_HEIGHT 4096
+
+#define XSCALER_HSF 0x0100
+#define XSCALER_VSF 0x0104
+#define XSCALER_SF_SHIFT 20
+#define XSCALER_SF_MASK 0xffffff
+#define XSCALER_SOURCE_SIZE 0x0108
+#define XSCALER_SIZE_HORZ_SHIFT 0
+#define XSCALER_SIZE_VERT_SHIFT 16
+#define XSCALER_SIZE_MASK 0xfff
+#define XSCALER_HAPERTURE 0x010c
+#define XSCALER_VAPERTURE 0x0110
+#define XSCALER_APERTURE_START_SHIFT 0
+#define XSCALER_APERTURE_END_SHIFT 16
+#define XSCALER_OUTPUT_SIZE 0x0114
+#define XSCALER_COEF_DATA_IN 0x0134
+#define XSCALER_COEF_DATA_IN_SHIFT 16
+
+/* Fixed point operations */
+#define FRAC_N 8
+
+static inline s16 fixp_new(s16 a)
+{
+ return a << FRAC_N;
+}
+
+static inline s16 fixp_mult(s16 a, s16 b)
+{
+ return ((s32)(a * b)) >> FRAC_N;
+}
+
+/**
+ * struct xscaler_device - Xilinx Scaler device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_format: Xilinx Video IP format
+ * @crop: Active crop rectangle for the sink pad
+ * @num_hori_taps: number of vertical taps
+ * @num_vert_taps: number of vertical taps
+ * @max_num_phases: maximum number of phases
+ * @separate_yc_coef: separate coefficients for Luma(y) and Chroma(c)
+ * @separate_hv_coef: separate coefficients for Horizontal(h) and Vertical(v)
+ */
+struct xscaler_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_format;
+ struct v4l2_rect crop;
+
+ u32 num_hori_taps;
+ u32 num_vert_taps;
+ u32 max_num_phases;
+ bool separate_yc_coef;
+ bool separate_hv_coef;
+};
+
+static inline struct xscaler_device *to_scaler(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xscaler_device, xvip.subdev);
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+/**
+ * lanczos - Lanczos 2D FIR kernel convolution
+ * @x: phase
+ * @a: Lanczos kernel size
+ *
+ * Return: the coefficient value in fixed point format.
+ */
+static s16 lanczos(s16 x, s16 a)
+{
+ s16 pi;
+ s16 numerator;
+ s16 denominator;
+ s16 temp;
+
+ if (x < -a || x > a)
+ return 0;
+ else if (x == 0)
+ return fixp_new(1);
+
+ /* a * sin(pi * x) * sin(pi * x / a) / (pi * pi * x * x) */
+
+ pi = (fixp_new(157) << FRAC_N) / fixp_new(50);
+
+ if (x < 0)
+ x = -x;
+
+ /* sin(pi * x) */
+ temp = fixp_mult(fixp_new(180), x);
+ temp = fixp_sin16(temp >> FRAC_N);
+
+ /* a * sin(pi * x) */
+ numerator = fixp_mult(temp, a);
+
+ /* sin(pi * x / a) */
+ temp = (fixp_mult(fixp_new(180), x) << FRAC_N) / a;
+ temp = fixp_sin16(temp >> FRAC_N);
+
+ /* a * sin(pi * x) * sin(pi * x / a) */
+ numerator = fixp_mult(temp, numerator);
+
+ /* pi * pi * x * x */
+ denominator = fixp_mult(pi, pi);
+ temp = fixp_mult(x, x);
+ denominator = fixp_mult(temp, denominator);
+
+ return (numerator << FRAC_N) / denominator;
+}
+
+/**
+ * xscaler_set_coefs - generate and program the coefficient table
+ * @xscaler: scaler device
+ * @taps: maximum coefficient tap index
+ *
+ * Generate the coefficient table using Lanczos resampling, and program
+ * generated coefficients to the scaler. The generated coefficients are
+ * supposed to work regardless of resolutions.
+ *
+ * Return: 0 if the coefficient table is programmed, and -ENOMEM if memory
+ * allocation for the table fails.
+ */
+static int xscaler_set_coefs(struct xscaler_device *xscaler, s16 taps)
+{
+ s16 *coef;
+ s16 dy;
+ u32 coef_val;
+ u16 phases = xscaler->max_num_phases;
+ u16 i;
+ u16 j;
+
+ coef = kcalloc(phases, sizeof(*coef), GFP_KERNEL);
+ if (!coef)
+ return -ENOMEM;
+
+ for (i = 0; i < phases; i++) {
+ s16 sum = 0;
+
+ dy = ((fixp_new(i) << FRAC_N) / fixp_new(phases));
+
+ /* Generate Lanczos coefficients */
+ for (j = 0; j < taps; j++) {
+ coef[j] = lanczos(fixp_new(j - (taps >> 1)) + dy,
+ fixp_new(taps >> 1));
+ sum += coef[j];
+ }
+
+ /* Program coefficients */
+ for (j = 0; j < taps; j += 2) {
+ /* Normalize and multiply coefficients */
+ coef_val = (((coef[j] << FRAC_N) << (FRAC_N - 2)) /
+ sum) & 0xffff;
+ if (j + 1 < taps)
+ coef_val |= ((((coef[j + 1] << FRAC_N) <<
+ (FRAC_N - 2)) / sum) & 0xffff) <<
+ 16;
+
+ xvip_write(&xscaler->xvip, XSCALER_COEF_DATA_IN,
+ coef_val);
+ }
+ }
+
+ kfree(coef);
+
+ return 0;
+}
+
+static void xscaler_set_aperture(struct xscaler_device *xscaler)
+{
+ u16 start;
+ u16 end;
+ u32 scale_factor;
+
+ xvip_disable_reg_update(&xscaler->xvip);
+
+ /* set horizontal aperture */
+ start = xscaler->crop.left;
+ end = start + xscaler->crop.width - 1;
+ xvip_write(&xscaler->xvip, XSCALER_HAPERTURE,
+ (end << XSCALER_APERTURE_END_SHIFT) |
+ (start << XSCALER_APERTURE_START_SHIFT));
+
+ /* set vertical aperture */
+ start = xscaler->crop.top;
+ end = start + xscaler->crop.height - 1;
+ xvip_write(&xscaler->xvip, XSCALER_VAPERTURE,
+ (end << XSCALER_APERTURE_END_SHIFT) |
+ (start << XSCALER_APERTURE_START_SHIFT));
+
+ /* set scaling factors */
+ scale_factor = ((xscaler->crop.width << XSCALER_SF_SHIFT) /
+ xscaler->formats[XVIP_PAD_SOURCE].width) &
+ XSCALER_SF_MASK;
+ xvip_write(&xscaler->xvip, XSCALER_HSF, scale_factor);
+
+ scale_factor = ((xscaler->crop.height << XSCALER_SF_SHIFT) /
+ xscaler->formats[XVIP_PAD_SOURCE].height) &
+ XSCALER_SF_MASK;
+ xvip_write(&xscaler->xvip, XSCALER_VSF, scale_factor);
+
+ xvip_enable_reg_update(&xscaler->xvip);
+}
+
+static int xscaler_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ u32 width;
+ u32 height;
+
+ if (!enable) {
+ xvip_stop(&xscaler->xvip);
+ return 0;
+ }
+
+ /* set input width / height */
+ width = xscaler->formats[XVIP_PAD_SINK].width;
+ height = xscaler->formats[XVIP_PAD_SINK].height;
+ xvip_write(&xscaler->xvip, XSCALER_SOURCE_SIZE,
+ (height << XSCALER_SIZE_VERT_SHIFT) |
+ (width << XSCALER_SIZE_HORZ_SHIFT));
+
+ /* set output width / height */
+ width = xscaler->formats[XVIP_PAD_SOURCE].width;
+ height = xscaler->formats[XVIP_PAD_SOURCE].height;
+ xvip_write(&xscaler->xvip, XSCALER_OUTPUT_SIZE,
+ (height << XSCALER_SIZE_VERT_SHIFT) |
+ (width << XSCALER_SIZE_HORZ_SHIFT));
+
+ /* set aperture */
+ xscaler_set_aperture(xscaler);
+
+ xvip_start(&xscaler->xvip);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int xscaler_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ fse->min_width = XSCALER_MIN_WIDTH;
+ fse->max_width = XSCALER_MAX_WIDTH;
+ fse->min_height = XSCALER_MIN_HEIGHT;
+ fse->max_height = XSCALER_MAX_HEIGHT;
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__xscaler_get_pad_format(struct xscaler_device *xscaler,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xscaler->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xscaler->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static struct v4l2_rect *__xscaler_get_crop(struct xscaler_device *xscaler,
+ struct v4l2_subdev_pad_config *cfg,
+ u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(&xscaler->xvip.subdev, cfg,
+ XVIP_PAD_SINK);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xscaler->crop;
+ default:
+ return NULL;
+ }
+}
+
+static int xscaler_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+
+ fmt->format = *__xscaler_get_pad_format(xscaler, cfg, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static void xscaler_try_crop(const struct v4l2_mbus_framefmt *sink,
+ struct v4l2_rect *crop)
+{
+
+ crop->left = min_t(u32, crop->left, sink->width - XSCALER_MIN_WIDTH);
+ crop->top = min_t(u32, crop->top, sink->height - XSCALER_MIN_HEIGHT);
+ crop->width = clamp_t(u32, crop->width, XSCALER_MIN_WIDTH,
+ sink->width - crop->left);
+ crop->height = clamp_t(u32, crop->height, XSCALER_MIN_HEIGHT,
+ sink->height - crop->top);
+}
+
+static int xscaler_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ format = __xscaler_get_pad_format(xscaler, cfg, fmt->pad, fmt->which);
+
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XSCALER_MIN_WIDTH, XSCALER_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XSCALER_MIN_HEIGHT, XSCALER_MAX_HEIGHT);
+
+ fmt->format = *format;
+
+ if (fmt->pad == XVIP_PAD_SINK) {
+ /* Set the crop rectangle to the full frame */
+ crop = __xscaler_get_crop(xscaler, cfg, fmt->which);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+ }
+
+ return 0;
+}
+
+static int xscaler_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->pad != XVIP_PAD_SINK)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ format = __xscaler_get_pad_format(xscaler, cfg, XVIP_PAD_SINK,
+ sel->which);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *__xscaler_get_crop(xscaler, cfg, sel->which);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int xscaler_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ if ((sel->target != V4L2_SEL_TGT_CROP) || (sel->pad != XVIP_PAD_SINK))
+ return -EINVAL;
+
+ format = __xscaler_get_pad_format(xscaler, cfg, XVIP_PAD_SINK,
+ sel->which);
+ xscaler_try_crop(format, &sel->r);
+ *__xscaler_get_crop(xscaler, cfg, sel->which) = sel->r;
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int xscaler_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xscaler->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xscaler->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xscaler_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xscaler_video_ops = {
+ .s_stream = xscaler_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xscaler_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xscaler_enum_frame_size,
+ .get_fmt = xscaler_get_format,
+ .set_fmt = xscaler_set_format,
+ .get_selection = xscaler_get_selection,
+ .set_selection = xscaler_set_selection,
+};
+
+static struct v4l2_subdev_ops xscaler_ops = {
+ .video = &xscaler_video_ops,
+ .pad = &xscaler_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xscaler_internal_ops = {
+ .open = xscaler_open,
+ .close = xscaler_close,
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xscaler_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xscaler_pm_suspend(struct device *dev)
+{
+ struct xscaler_device *xscaler = dev_get_drvdata(dev);
+
+ xvip_suspend(&xscaler->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xscaler_pm_resume(struct device *dev)
+{
+ struct xscaler_device *xscaler = dev_get_drvdata(dev);
+
+ xvip_resume(&xscaler->xvip);
+
+ return 0;
+}
+
+/*
+ * Platform Device Driver
+ */
+
+static int xscaler_parse_of(struct xscaler_device *xscaler)
+{
+ struct device *dev = xscaler->xvip.dev;
+ struct device_node *node = xscaler->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ int ret;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ if (!xscaler->vip_format) {
+ xscaler->vip_format = vip_format;
+ } else if (xscaler->vip_format != vip_format) {
+ dev_err(dev, "in/out format mismatch in DT");
+ return -EINVAL;
+ }
+ }
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-hori-taps",
+ &xscaler->num_hori_taps);
+ if (ret < 0)
+ return ret;
+
+ ret = of_property_read_u32(node, "xlnx,num-vert-taps",
+ &xscaler->num_vert_taps);
+ if (ret < 0)
+ return ret;
+
+ ret = of_property_read_u32(node, "xlnx,max-num-phases",
+ &xscaler->max_num_phases);
+ if (ret < 0)
+ return ret;
+
+ xscaler->separate_yc_coef =
+ of_property_read_bool(node, "xlnx,separate-yc-coef");
+
+ xscaler->separate_hv_coef =
+ of_property_read_bool(node, "xlnx,separate-hv-coef");
+
+ return 0;
+}
+
+static int xscaler_probe(struct platform_device *pdev)
+{
+ struct xscaler_device *xscaler;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ u32 size;
+ int ret;
+
+ xscaler = devm_kzalloc(&pdev->dev, sizeof(*xscaler), GFP_KERNEL);
+ if (!xscaler)
+ return -ENOMEM;
+
+ xscaler->xvip.dev = &pdev->dev;
+
+ ret = xscaler_parse_of(xscaler);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xscaler->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and initialize the core */
+ xvip_reset(&xscaler->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xscaler->xvip.subdev;
+ v4l2_subdev_init(subdev, &xscaler_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xscaler_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xscaler);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xscaler->default_formats[XVIP_PAD_SINK];
+ default_format->code = xscaler->vip_format->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ size = xvip_read(&xscaler->xvip, XSCALER_SOURCE_SIZE);
+ default_format->width = (size >> XSCALER_SIZE_HORZ_SHIFT) &
+ XSCALER_SIZE_MASK;
+ default_format->height = (size >> XSCALER_SIZE_VERT_SHIFT) &
+ XSCALER_SIZE_MASK;
+
+ xscaler->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xscaler->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xscaler->default_formats[XVIP_PAD_SINK];
+ size = xvip_read(&xscaler->xvip, XSCALER_OUTPUT_SIZE);
+ default_format->width = (size >> XSCALER_SIZE_HORZ_SHIFT) &
+ XSCALER_SIZE_MASK;
+ default_format->height = (size >> XSCALER_SIZE_VERT_SHIFT) &
+ XSCALER_SIZE_MASK;
+
+ xscaler->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xscaler->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xscaler->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xscaler_media_ops;
+
+ ret = media_entity_pads_init(&subdev->entity, 2, xscaler->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xscaler);
+
+ xvip_print_version(&xscaler->xvip);
+
+ ret = xscaler_set_coefs(xscaler, (s16)xscaler->num_hori_taps);
+ if (ret < 0)
+ goto error;
+
+ if (xscaler->separate_hv_coef) {
+ ret = xscaler_set_coefs(xscaler, (s16)xscaler->num_vert_taps);
+ if (ret < 0)
+ goto error;
+ }
+
+ if (xscaler->separate_yc_coef) {
+ ret = xscaler_set_coefs(xscaler, (s16)xscaler->num_hori_taps);
+ if (ret < 0)
+ goto error;
+
+ if (xscaler->separate_hv_coef) {
+ ret = xscaler_set_coefs(xscaler,
+ (s16)xscaler->num_vert_taps);
+ if (ret < 0)
+ goto error;
+ }
+ }
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xscaler->xvip);
+ return ret;
+}
+
+static int xscaler_remove(struct platform_device *pdev)
+{
+ struct xscaler_device *xscaler = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xscaler->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xscaler->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xscaler_pm_ops, xscaler_pm_suspend, xscaler_pm_resume);
+
+static const struct of_device_id xscaler_of_id_table[] = {
+ { .compatible = "xlnx,v-scaler-8.1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xscaler_of_id_table);
+
+static struct platform_driver xscaler_driver = {
+ .driver = {
+ .name = "xilinx-scaler",
+ .of_match_table = xscaler_of_id_table,
+ },
+ .probe = xscaler_probe,
+ .remove = xscaler_remove,
+};
+
+module_platform_driver(xscaler_driver);
+
+MODULE_DESCRIPTION("Xilinx Scaler Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-scenechange-channel.c b/drivers/media/platform/xilinx/xilinx-scenechange-channel.c
new file mode 100644
index 000000000000..852191ac6500
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scenechange-channel.c
@@ -0,0 +1,352 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Scene Change Detection driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+ * Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#include <linux/of.h>
+#include <linux/xilinx-v4l2-events.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-scenechange.h"
+#include "xilinx-vip.h"
+
+#define XSCD_MAX_WIDTH 3840
+#define XSCD_MAX_HEIGHT 2160
+#define XSCD_MIN_WIDTH 640
+#define XSCD_MIN_HEIGHT 480
+
+#define XSCD_V_SUBSAMPLING 16
+#define XSCD_BYTE_ALIGN 16
+#define MULTIPLICATION_FACTOR 100
+#define SCENE_CHANGE_THRESHOLD 0.5
+
+#define XSCD_SCENE_CHANGE 1
+#define XSCD_NO_SCENE_CHANGE 0
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int xscd_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return 0;
+}
+
+static int xscd_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__xscd_get_pad_format(struct xscd_chan *chan,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&chan->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &chan->format;
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static int xscd_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscd_chan *chan = to_xscd_chan(subdev);
+
+ fmt->format = *__xscd_get_pad_format(chan, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static int xscd_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscd_chan *chan = to_xscd_chan(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xscd_get_pad_format(chan, cfg, fmt->pad, fmt->which);
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XSCD_MIN_WIDTH, XSCD_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XSCD_MIN_HEIGHT, XSCD_MAX_HEIGHT);
+ format->code = fmt->format.code;
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int xscd_chan_get_vid_fmt(u32 media_bus_fmt, bool memory_based)
+{
+ u32 vid_fmt;
+
+ if (memory_based) {
+ switch (media_bus_fmt) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ vid_fmt = XSCD_VID_FMT_Y8;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ vid_fmt = XSCD_VID_FMT_Y10;
+ break;
+ default:
+ vid_fmt = XSCD_VID_FMT_Y8;
+ }
+
+ return vid_fmt;
+ }
+
+ /* Streaming based */
+ switch (media_bus_fmt) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ vid_fmt = XSCD_VID_FMT_YUV_420;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ vid_fmt = XSCD_VID_FMT_YUV_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ vid_fmt = XSCD_VID_FMT_YUV_444;
+ break;
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ vid_fmt = XSCD_VID_FMT_RGB;
+ break;
+ default:
+ vid_fmt = XSCD_VID_FMT_YUV_420;
+ }
+
+ return vid_fmt;
+}
+
+/**
+ * xscd_chan_configure_params - Program parameters to HW registers
+ * @chan: Driver specific channel struct pointer
+ */
+static void xscd_chan_configure_params(struct xscd_chan *chan)
+{
+ u32 vid_fmt, stride;
+
+ xscd_write(chan->iomem, XSCD_WIDTH_OFFSET, chan->format.width);
+
+ /* Stride is required only for memory based IP, not for streaming IP */
+ if (chan->xscd->memory_based) {
+ stride = roundup(chan->format.width, XSCD_BYTE_ALIGN);
+ xscd_write(chan->iomem, XSCD_STRIDE_OFFSET, stride);
+ }
+
+ xscd_write(chan->iomem, XSCD_HEIGHT_OFFSET, chan->format.height);
+
+ /* Hardware video format */
+ vid_fmt = xscd_chan_get_vid_fmt(chan->format.code,
+ chan->xscd->memory_based);
+ xscd_write(chan->iomem, XSCD_VID_FMT_OFFSET, vid_fmt);
+
+ /*
+ * This is the vertical subsampling factor of the input image. Instead
+ * of sampling every line to calculate the histogram, IP uses this
+ * register value to sample only specific lines of the frame.
+ */
+ xscd_write(chan->iomem, XSCD_SUBSAMPLE_OFFSET, XSCD_V_SUBSAMPLING);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+static int xscd_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xscd_chan *chan = to_xscd_chan(subdev);
+
+ if (enable)
+ xscd_chan_configure_params(chan);
+
+ xscd_dma_enable_channel(&chan->dmachan, enable);
+ return 0;
+}
+
+static int xscd_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret;
+ struct xscd_chan *chan = to_xscd_chan(sd);
+
+ mutex_lock(&chan->lock);
+
+ switch (sub->type) {
+ case V4L2_EVENT_XLNXSCD:
+ ret = v4l2_event_subscribe(fh, sub, 1, NULL);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&chan->lock);
+
+ return ret;
+}
+
+static int xscd_unsubscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret;
+ struct xscd_chan *chan = to_xscd_chan(sd);
+
+ mutex_lock(&chan->lock);
+ ret = v4l2_event_unsubscribe(fh, sub);
+ mutex_unlock(&chan->lock);
+
+ return ret;
+}
+
+static int xscd_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xscd_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops xscd_core_ops = {
+ .subscribe_event = xscd_subscribe_event,
+ .unsubscribe_event = xscd_unsubscribe_event
+};
+
+static struct v4l2_subdev_video_ops xscd_video_ops = {
+ .s_stream = xscd_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xscd_pad_ops = {
+ .enum_mbus_code = xscd_enum_mbus_code,
+ .enum_frame_size = xscd_enum_frame_size,
+ .get_fmt = xscd_get_format,
+ .set_fmt = xscd_set_format,
+};
+
+static struct v4l2_subdev_ops xscd_ops = {
+ .core = &xscd_core_ops,
+ .video = &xscd_video_ops,
+ .pad = &xscd_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xscd_internal_ops = {
+ .open = xscd_open,
+ .close = xscd_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xscd_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+void xscd_chan_event_notify(struct xscd_chan *chan)
+{
+ u32 *eventdata;
+ u32 sad, scd_threshold;
+
+ sad = xscd_read(chan->iomem, XSCD_SAD_OFFSET);
+ sad = (sad * XSCD_V_SUBSAMPLING * MULTIPLICATION_FACTOR) /
+ (chan->format.width * chan->format.height);
+ eventdata = (u32 *)&chan->event.u.data;
+ scd_threshold = SCENE_CHANGE_THRESHOLD * MULTIPLICATION_FACTOR;
+
+ if (sad > scd_threshold)
+ eventdata[0] = XSCD_SCENE_CHANGE;
+ else
+ eventdata[0] = XSCD_NO_SCENE_CHANGE;
+
+ chan->event.type = V4L2_EVENT_XLNXSCD;
+ v4l2_subdev_notify_event(&chan->subdev, &chan->event);
+}
+
+/**
+ * xscd_chan_init - Initialize the V4L2 subdev for a channel
+ * @xscd: Pointer to the SCD device structure
+ * @chan_id: Channel id
+ * @node: device node
+ *
+ * Return: '0' on success and failure value on error
+ */
+int xscd_chan_init(struct xscd_device *xscd, unsigned int chan_id,
+ struct device_node *node)
+{
+ struct xscd_chan *chan = &xscd->chans[chan_id];
+ struct v4l2_subdev *subdev;
+ unsigned int num_pads;
+ int ret;
+
+ mutex_init(&chan->lock);
+ chan->xscd = xscd;
+ chan->id = chan_id;
+ chan->iomem = chan->xscd->iomem + chan->id * XSCD_CHAN_OFFSET;
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &chan->subdev;
+ v4l2_subdev_init(subdev, &xscd_ops);
+ subdev->dev = chan->xscd->dev;
+ subdev->fwnode = of_fwnode_handle(node);
+ subdev->internal_ops = &xscd_internal_ops;
+ snprintf(subdev->name, sizeof(subdev->name), "xlnx-scdchan.%u",
+ chan_id);
+ v4l2_set_subdevdata(subdev, chan);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ /* Initialize default format */
+ chan->format.code = MEDIA_BUS_FMT_VYYUYY8_1X24;
+ chan->format.field = V4L2_FIELD_NONE;
+ chan->format.width = XSCD_MAX_WIDTH;
+ chan->format.height = XSCD_MAX_HEIGHT;
+
+ /* Initialize media pads */
+ num_pads = xscd->memory_based ? 1 : 2;
+
+ chan->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ if (!xscd->memory_based)
+ chan->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&subdev->entity, num_pads, chan->pads);
+ if (ret < 0)
+ goto error;
+
+ subdev->entity.ops = &xscd_media_ops;
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(chan->xscd->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ dev_info(chan->xscd->dev, "Scene change detection channel found!\n");
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ return ret;
+}
diff --git a/drivers/media/platform/xilinx/xilinx-scenechange-dma.c b/drivers/media/platform/xilinx/xilinx-scenechange-dma.c
new file mode 100644
index 000000000000..58437a769605
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scenechange-dma.c
@@ -0,0 +1,554 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Scene Change Detection DMA driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+ * Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/of_dma.h>
+#include <linux/slab.h>
+
+#include "../../../dma/dmaengine.h"
+
+#include "xilinx-scenechange.h"
+
+/**
+ * xscd_dma_start - Start the SCD core
+ * @xscd: The SCD device
+ * @channels: Bitmask of enabled channels
+ */
+static void xscd_dma_start(struct xscd_device *xscd, unsigned int channels)
+{
+ xscd_write(xscd->iomem, XSCD_IE_OFFSET, XSCD_IE_AP_DONE);
+ xscd_write(xscd->iomem, XSCD_GIE_OFFSET, XSCD_GIE_EN);
+ xscd_write(xscd->iomem, XSCD_CHAN_EN_OFFSET, channels);
+
+ xscd_set(xscd->iomem, XSCD_CTRL_OFFSET,
+ xscd->memory_based ? XSCD_CTRL_AP_START
+ : XSCD_CTRL_AP_START |
+ XSCD_CTRL_AUTO_RESTART);
+
+ xscd->running = true;
+}
+
+/**
+ * xscd_dma_stop - Stop the SCD core
+ * @xscd: The SCD device
+ */
+static void xscd_dma_stop(struct xscd_device *xscd)
+{
+ xscd_clr(xscd->iomem, XSCD_CTRL_OFFSET,
+ xscd->memory_based ? XSCD_CTRL_AP_START
+ : XSCD_CTRL_AP_START |
+ XSCD_CTRL_AUTO_RESTART);
+
+ xscd->running = false;
+}
+
+/**
+ * xscd_dma_setup_channel - Setup a channel for transfer
+ * @chan: Driver specific channel struct pointer
+ *
+ * Return: 1 if the channel starts to run for a new transfer. Otherwise, 0.
+ */
+static int xscd_dma_setup_channel(struct xscd_dma_chan *chan)
+{
+ struct xscd_dma_tx_descriptor *desc;
+
+ if (!chan->enabled)
+ return 0;
+
+ if (list_empty(&chan->pending_list))
+ return 0;
+
+ desc = list_first_entry(&chan->pending_list,
+ struct xscd_dma_tx_descriptor, node);
+ list_del(&desc->node);
+
+ xscd_write(chan->iomem, XSCD_ADDR_OFFSET, desc->sw.luma_plane_addr);
+ chan->active_desc = desc;
+
+ return 1;
+}
+
+/**
+ * xscd_dma_kick - Start a run of the SCD core if channels are ready
+ * @xscd: The SCD device
+ *
+ * This function starts a single run of the SCD core when all the following
+ * conditions are met:
+ *
+ * - The SCD is not currently running
+ * - At least one channel is enabled and has buffers available
+ *
+ * It can be used to start the SCD when a buffer is queued, when a channel
+ * starts streaming, or to start the next run. Calling this function is only
+ * valid for memory-based mode and is not permitted for stream-based mode.
+ *
+ * The running state for all channels is updated. Channels that are being
+ * stopped are signalled through the channel wait queue.
+ *
+ * The function must be called with the xscd_device lock held.
+ */
+static void xscd_dma_kick(struct xscd_device *xscd)
+{
+ unsigned int channels = 0;
+ unsigned int i;
+
+ lockdep_assert_held(&xscd->lock);
+
+ if (xscd->running)
+ return;
+
+ for (i = 0; i < xscd->num_streams; i++) {
+ struct xscd_dma_chan *chan = xscd->channels[i];
+ unsigned long flags;
+ unsigned int running;
+ bool stopped;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ running = xscd_dma_setup_channel(chan);
+ stopped = chan->running && !running;
+ chan->running = running;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ channels |= running << chan->id;
+ if (stopped)
+ wake_up(&chan->wait);
+ }
+
+ if (channels)
+ xscd_dma_start(xscd, channels);
+ else
+ xscd_dma_stop(xscd);
+}
+
+/**
+ * xscd_dma_enable_channel - Enable/disable a channel
+ * @chan: Driver specific channel struct pointer
+ * @enable: True to enable the channel, false to disable it
+ *
+ * This function enables or disable a channel. When operating in memory-based
+ * mode, enabling a channel kicks processing if buffers are available for any
+ * enabled channel and the SCD core is idle. When operating in stream-based
+ * mode, the SCD core is started or stopped synchronously when then channel is
+ * enabled or disabled.
+ *
+ * This function must be called in non-atomic, non-interrupt context.
+ */
+void xscd_dma_enable_channel(struct xscd_dma_chan *chan, bool enable)
+{
+ struct xscd_device *xscd = chan->xscd;
+
+ if (enable) {
+ /*
+ * FIXME: Don't set chan->enabled to false here, it will be
+ * done in xscd_dma_terminate_all(). This works around a bug
+ * introduced in commit 2e77607047c6 ("xilinx: v4l2: dma: Add
+ * multiple output support") that stops all channels when the
+ * first one is stopped, even though they are part of
+ * independent pipelines. This workaround should be safe as
+ * long as dmaengine_terminate_all() is called after
+ * xvip_pipeline_set_stream().
+ */
+ spin_lock_irq(&chan->lock);
+ chan->enabled = true;
+ spin_unlock_irq(&chan->lock);
+ }
+
+ if (xscd->memory_based) {
+ if (enable) {
+ spin_lock_irq(&xscd->lock);
+ xscd_dma_kick(xscd);
+ spin_unlock_irq(&xscd->lock);
+ }
+ } else {
+ if (enable)
+ xscd_dma_start(xscd, BIT(chan->id));
+ else
+ xscd_dma_stop(xscd);
+ }
+}
+
+/**
+ * xscd_dma_irq_handler - scdma Interrupt handler
+ * @xscd: Pointer to the SCD device structure
+ */
+void xscd_dma_irq_handler(struct xscd_device *xscd)
+{
+ unsigned int i;
+
+ /*
+ * Mark the active descriptors as complete, move them to the done list
+ * and schedule the tasklet to clean them up.
+ */
+ for (i = 0; i < xscd->num_streams; ++i) {
+ struct xscd_dma_chan *chan = xscd->channels[i];
+ struct xscd_dma_tx_descriptor *desc = chan->active_desc;
+
+ if (!desc)
+ continue;
+
+ dma_cookie_complete(&desc->async_tx);
+ xscd_chan_event_notify(&xscd->chans[i]);
+
+ spin_lock(&chan->lock);
+ list_add_tail(&desc->node, &chan->done_list);
+ chan->active_desc = NULL;
+ spin_unlock(&chan->lock);
+
+ tasklet_schedule(&chan->tasklet);
+ }
+
+ /* Start the next run, if any. */
+ spin_lock(&xscd->lock);
+ xscd->running = false;
+ xscd_dma_kick(xscd);
+ spin_unlock(&xscd->lock);
+}
+
+/* -----------------------------------------------------------------------------
+ * DMA Engine
+ */
+
+/**
+ * xscd_dma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor
+ *
+ * Return: cookie value on success and failure value on error
+ */
+static dma_cookie_t xscd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xscd_dma_tx_descriptor *desc = to_xscd_dma_tx_descriptor(tx);
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&desc->node, &chan->pending_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/**
+ * xscd_dma_free_desc_list - Free descriptors list
+ * @chan: Driver specific dma channel
+ * @list: List to parse and delete the descriptor
+ */
+static void xscd_dma_free_desc_list(struct xscd_dma_chan *chan,
+ struct list_head *list)
+{
+ struct xscd_dma_tx_descriptor *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node) {
+ list_del(&desc->node);
+ kfree(desc);
+ }
+}
+
+/**
+ * xscd_dma_free_descriptors - Free channel descriptors
+ * @chan: Driver specific dma channel
+ */
+static void xscd_dma_free_descriptors(struct xscd_dma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xscd_dma_free_desc_list(chan, &chan->pending_list);
+ xscd_dma_free_desc_list(chan, &chan->done_list);
+ kfree(chan->active_desc);
+
+ chan->active_desc = NULL;
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * scd_dma_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific dma channel
+ */
+static void xscd_dma_chan_desc_cleanup(struct xscd_dma_chan *chan)
+{
+ struct xscd_dma_tx_descriptor *desc, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ list_del(&desc->node);
+
+ /* Run the link descriptor callback function */
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ kfree(desc);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xscd_dma_dma_prep_interleaved - prepare a descriptor for a
+ * DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xscd_dma_prep_interleaved(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
+ struct xscd_dma_tx_descriptor *desc;
+ struct xscd_dma_desc *sw;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xscd_dma_tx_submit;
+ async_tx_ack(&desc->async_tx);
+
+ sw = &desc->sw;
+ sw->vsize = xt->numf;
+ sw->hsize = xt->sgl[0].size;
+ sw->stride = xt->sgl[0].size + xt->sgl[0].icg;
+ sw->luma_plane_addr = xt->src_start;
+
+ return &desc->async_tx;
+}
+
+static bool xscd_dma_is_running(struct xscd_dma_chan *chan)
+{
+ bool running;
+
+ spin_lock_irq(&chan->lock);
+ running = chan->running;
+ spin_unlock_irq(&chan->lock);
+
+ return running;
+}
+
+/**
+ * xscd_dma_terminate_all - Halt the channel and free descriptors
+ * @dchan: Driver specific dma channel pointer
+ *
+ * Return: 0
+ */
+static int xscd_dma_terminate_all(struct dma_chan *dchan)
+{
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
+ int ret;
+
+ spin_lock_irq(&chan->lock);
+ chan->enabled = false;
+ spin_unlock_irq(&chan->lock);
+
+ /* Wait for any on-going transfer to complete. */
+ ret = wait_event_timeout(chan->wait, !xscd_dma_is_running(chan),
+ msecs_to_jiffies(100));
+ WARN_ON(ret == 0);
+
+ xscd_dma_free_descriptors(chan);
+ return 0;
+}
+
+/**
+ * xscd_dma_issue_pending - Issue pending transactions
+ * @dchan: DMA channel
+ */
+static void xscd_dma_issue_pending(struct dma_chan *dchan)
+{
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
+ struct xscd_device *xscd = chan->xscd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xscd->lock, flags);
+ xscd_dma_kick(xscd);
+ spin_unlock_irqrestore(&xscd->lock, flags);
+}
+
+static enum dma_status xscd_dma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+/**
+ * xscd_dma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel
+ */
+static void xscd_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
+
+ xscd_dma_free_descriptors(chan);
+}
+
+/**
+ * xscd_dma_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Xilinx scdma channel structure
+ */
+static void xscd_dma_do_tasklet(unsigned long data)
+{
+ struct xscd_dma_chan *chan = (struct xscd_dma_chan *)data;
+
+ xscd_dma_chan_desc_cleanup(chan);
+}
+
+/**
+ * xscd_dma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xscd_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ dma_cookie_init(dchan);
+ return 0;
+}
+
+/**
+ * of_scdma_xilinx_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success and NULL on error
+ */
+static struct dma_chan *of_scdma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xscd_device *xscd = ofdma->of_dma_data;
+ u32 chan_id = dma_spec->args[0];
+
+ if (chan_id >= xscd->num_streams)
+ return NULL;
+
+ if (!xscd->channels[chan_id])
+ return NULL;
+
+ return dma_get_slave_channel(&xscd->channels[chan_id]->common);
+}
+
+static void xscd_dma_chan_init(struct xscd_device *xscd, int chan_id)
+{
+ struct xscd_dma_chan *chan = &xscd->chans[chan_id].dmachan;
+
+ chan->id = chan_id;
+ chan->iomem = xscd->iomem + chan->id * XSCD_CHAN_OFFSET;
+ chan->xscd = xscd;
+
+ xscd->channels[chan->id] = chan;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+ tasklet_init(&chan->tasklet, xscd_dma_do_tasklet,
+ (unsigned long)chan);
+ init_waitqueue_head(&chan->wait);
+
+ chan->common.device = &xscd->dma_device;
+ list_add_tail(&chan->common.device_node, &xscd->dma_device.channels);
+}
+
+/**
+ * xscd_dma_chan_remove - Per Channel remove function
+ * @chan: Driver specific DMA channel
+ */
+static void xscd_dma_chan_remove(struct xscd_dma_chan *chan)
+{
+ list_del(&chan->common.device_node);
+}
+
+/**
+ * xscd_dma_init - Initialize the SCD DMA engine
+ * @xscd: Pointer to the SCD device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+int xscd_dma_init(struct xscd_device *xscd)
+{
+ struct dma_device *ddev = &xscd->dma_device;
+ unsigned int chan_id;
+ int ret;
+
+ /* Initialize the DMA engine */
+ ddev->dev = xscd->dev;
+ dma_set_mask(xscd->dev, DMA_BIT_MASK(32));
+
+ INIT_LIST_HEAD(&ddev->channels);
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ ddev->device_alloc_chan_resources = xscd_dma_alloc_chan_resources;
+ ddev->device_free_chan_resources = xscd_dma_free_chan_resources;
+ ddev->device_tx_status = xscd_dma_tx_status;
+ ddev->device_issue_pending = xscd_dma_issue_pending;
+ ddev->device_terminate_all = xscd_dma_terminate_all;
+ ddev->device_prep_interleaved_dma = xscd_dma_prep_interleaved;
+
+ for (chan_id = 0; chan_id < xscd->num_streams; chan_id++)
+ xscd_dma_chan_init(xscd, chan_id);
+
+ ret = dma_async_device_register(ddev);
+ if (ret) {
+ dev_err(xscd->dev, "failed to register the dma device\n");
+ goto error;
+ }
+
+ ret = of_dma_controller_register(xscd->dev->of_node,
+ of_scdma_xilinx_xlate, xscd);
+ if (ret) {
+ dev_err(xscd->dev, "failed to register DMA to DT DMA helper\n");
+ goto error_of_dma;
+ }
+
+ dev_info(xscd->dev, "Xilinx Scene Change DMA is initialized!\n");
+ return 0;
+
+error_of_dma:
+ dma_async_device_unregister(ddev);
+
+error:
+ for (chan_id = 0; chan_id < xscd->num_streams; chan_id++)
+ xscd_dma_chan_remove(xscd->channels[chan_id]);
+
+ return ret;
+}
+
+/**
+ * xscd_dma_cleanup - Clean up the SCD DMA engine
+ * @xscd: Pointer to the SCD device structure
+ *
+ * This function is the counterpart of xscd_dma_init() and cleans up the
+ * resources related to the DMA engine.
+ */
+void xscd_dma_cleanup(struct xscd_device *xscd)
+{
+ dma_async_device_unregister(&xscd->dma_device);
+ of_dma_controller_free(xscd->dev->of_node);
+}
diff --git a/drivers/media/platform/xilinx/xilinx-scenechange.c b/drivers/media/platform/xilinx/xilinx-scenechange.c
new file mode 100644
index 000000000000..9135355934fe
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scenechange.c
@@ -0,0 +1,191 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Scene Change Detection driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+ * Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "xilinx-scenechange.h"
+
+#define XSCD_RESET_DEASSERT (0)
+#define XSCD_RESET_ASSERT (1)
+
+static irqreturn_t xscd_irq_handler(int irq, void *data)
+{
+ struct xscd_device *xscd = (struct xscd_device *)data;
+ u32 status;
+
+ status = xscd_read(xscd->iomem, XSCD_ISR_OFFSET);
+ if (!(status & XSCD_IE_AP_DONE))
+ return IRQ_NONE;
+
+ xscd_write(xscd->iomem, XSCD_ISR_OFFSET, XSCD_IE_AP_DONE);
+
+ if (xscd->memory_based)
+ xscd_dma_irq_handler(xscd);
+ else
+ xscd_chan_event_notify(&xscd->chans[0]);
+
+ return IRQ_HANDLED;
+}
+
+static int xscd_init_resources(struct xscd_device *xscd)
+{
+ struct platform_device *pdev = to_platform_device(xscd->dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xscd->iomem = devm_ioremap_resource(xscd->dev, res);
+ if (IS_ERR(xscd->iomem))
+ return PTR_ERR(xscd->iomem);
+
+ xscd->irq = platform_get_irq(pdev, 0);
+ if (xscd->irq < 0) {
+ dev_err(xscd->dev, "No valid irq found\n");
+ return -EINVAL;
+ }
+
+ xscd->clk = devm_clk_get(xscd->dev, NULL);
+ if (IS_ERR(xscd->clk))
+ return PTR_ERR(xscd->clk);
+
+ clk_prepare_enable(xscd->clk);
+ return 0;
+}
+
+static int xscd_parse_of(struct xscd_device *xscd)
+{
+ struct device *dev = xscd->dev;
+ struct device_node *node = xscd->dev->of_node;
+ int ret;
+
+ xscd->memory_based = of_property_read_bool(node, "xlnx,memorybased");
+ xscd->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xscd->rst_gpio)) {
+ if (PTR_ERR(xscd->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT\n");
+
+ return PTR_ERR(xscd->rst_gpio);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,numstreams",
+ &xscd->num_streams);
+ if (ret < 0)
+ return ret;
+
+ if (!xscd->memory_based && xscd->num_streams != 1) {
+ dev_err(dev, "Stream-based mode only supports one stream\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xscd_probe(struct platform_device *pdev)
+{
+ struct xscd_device *xscd;
+ struct device_node *subdev_node;
+ unsigned int id;
+ int ret;
+
+ xscd = devm_kzalloc(&pdev->dev, sizeof(*xscd), GFP_KERNEL);
+ if (!xscd)
+ return -ENOMEM;
+
+ spin_lock_init(&xscd->lock);
+
+ xscd->dev = &pdev->dev;
+ platform_set_drvdata(pdev, xscd);
+
+ ret = xscd_parse_of(xscd);
+ if (ret < 0)
+ return ret;
+
+ ret = xscd_init_resources(xscd);
+ if (ret < 0)
+ return ret;
+
+ /* Reset Scene Change Detection IP */
+ gpiod_set_value_cansleep(xscd->rst_gpio, XSCD_RESET_ASSERT);
+ gpiod_set_value_cansleep(xscd->rst_gpio, XSCD_RESET_DEASSERT);
+
+ /* Initialize the channels. */
+ xscd->chans = devm_kcalloc(xscd->dev, xscd->num_streams,
+ sizeof(*xscd->chans), GFP_KERNEL);
+ if (!xscd->chans)
+ return -ENOMEM;
+
+ id = 0;
+ for_each_child_of_node(xscd->dev->of_node, subdev_node) {
+ if (id >= xscd->num_streams) {
+ dev_warn(&pdev->dev,
+ "Too many channels, limiting to %u\n",
+ xscd->num_streams);
+ of_node_put(subdev_node);
+ break;
+ }
+
+ ret = xscd_chan_init(xscd, id, subdev_node);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize channel %u\n",
+ id);
+ return ret;
+ }
+
+ id++;
+ }
+
+ /* Initialize the DMA engine. */
+ ret = xscd_dma_init(xscd);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to initialize the DMA\n");
+
+ ret = devm_request_irq(xscd->dev, xscd->irq, xscd_irq_handler,
+ IRQF_SHARED, dev_name(xscd->dev), xscd);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to request IRQ\n");
+
+ dev_info(xscd->dev, "scene change detect device found!\n");
+ return 0;
+}
+
+static int xscd_remove(struct platform_device *pdev)
+{
+ struct xscd_device *xscd = platform_get_drvdata(pdev);
+
+ xscd_dma_cleanup(xscd);
+ clk_disable_unprepare(xscd->clk);
+
+ return 0;
+}
+
+static const struct of_device_id xscd_of_id_table[] = {
+ { .compatible = "xlnx,v-scd" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xscd_of_id_table);
+
+static struct platform_driver xscd_driver = {
+ .driver = {
+ .name = "xilinx-scd",
+ .of_match_table = xscd_of_id_table,
+ },
+ .probe = xscd_probe,
+ .remove = xscd_remove,
+};
+
+module_platform_driver(xscd_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx Scene Change Detection");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-scenechange.h b/drivers/media/platform/xilinx/xilinx-scenechange.h
new file mode 100644
index 000000000000..1573bf825217
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scenechange.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Scene Change Detection driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+ * Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#ifndef _XILINX_SCENECHANGE_H_
+#define _XILINX_SCENECHANGE_H_
+
+#include <linux/bitops.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <media/v4l2-subdev.h>
+
+struct clk;
+struct device;
+struct device_node;
+struct gpio_desc;
+
+/* Register/Descriptor Offsets */
+#define XSCD_CTRL_OFFSET 0x000
+#define XSCD_CTRL_AP_START BIT(0)
+#define XSCD_CTRL_AP_DONE BIT(1)
+#define XSCD_CTRL_AP_IDLE BIT(2)
+#define XSCD_CTRL_AP_READY BIT(3)
+#define XSCD_CTRL_AUTO_RESTART BIT(7)
+
+#define XSCD_GIE_OFFSET 0x004
+#define XSCD_GIE_EN BIT(0)
+
+#define XSCD_IE_OFFSET 0x008
+#define XSCD_IE_AP_DONE BIT(0)
+#define XSCD_IE_AP_READY BIT(1)
+
+#define XSCD_ISR_OFFSET 0x00c
+#define XSCD_WIDTH_OFFSET 0x010
+#define XSCD_HEIGHT_OFFSET 0x018
+#define XSCD_STRIDE_OFFSET 0x020
+#define XSCD_VID_FMT_OFFSET 0x028
+#define XSCD_VID_FMT_RGB 0
+#define XSCD_VID_FMT_YUV_444 1
+#define XSCD_VID_FMT_YUV_422 2
+#define XSCD_VID_FMT_YUV_420 3
+#define XSCD_VID_FMT_Y8 24
+#define XSCD_VID_FMT_Y10 25
+
+#define XSCD_SUBSAMPLE_OFFSET 0x030
+#define XSCD_SAD_OFFSET 0x038
+#define XSCD_ADDR_OFFSET 0x040
+#define XSCD_CHAN_OFFSET 0x100
+#define XSCD_CHAN_EN_OFFSET 0x780
+
+#define XSCD_MAX_CHANNELS 8
+
+/****************************** PROTOTYPES ************************************/
+
+struct xscd_device;
+
+/**
+ * struct xscd_dma_desc - DMA channel
+ * @luma_plane_addr: Luma plane buffer address
+ * @vsize: width of the luma frame
+ * @hsize: height of the luma frame
+ * @stride: stride of the luma frame
+ */
+struct xscd_dma_desc {
+ dma_addr_t luma_plane_addr;
+ u32 vsize;
+ u32 hsize;
+ u32 stride;
+};
+
+/**
+ * struct xscd_dma_tx_descriptor - Per Transaction structure
+ * @async_tx: Async transaction descriptor
+ * @sw: Software Descriptor
+ * @node: Node in the channel descriptor list
+ */
+struct xscd_dma_tx_descriptor {
+ struct dma_async_tx_descriptor async_tx;
+ struct xscd_dma_desc sw;
+ struct list_head node;
+};
+
+static inline struct xscd_dma_tx_descriptor *
+to_xscd_dma_tx_descriptor(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct xscd_dma_tx_descriptor, async_tx);
+}
+
+/**
+ * struct xscd_dma_chan - DMA Channel structure
+ * @xscd: SCD device
+ * @iomem: I/O memory address of the channel registers
+ * @id: scene change channel ID
+ * @common: DMA common channel
+ * @tasklet: Cleanup work after irq
+ * @lock: Protects pending_list, done_list, active_desc, enabled and running
+ * @pending_list: Descriptors waiting
+ * @done_list: Complete descriptors
+ * @active_desc: Currently active buffer being read/written to
+ * @enabled: Channel is enabled
+ * @running: Channel is running
+ * @wait: Wait queue to wait for the channel to stop
+ */
+struct xscd_dma_chan {
+ struct xscd_device *xscd;
+ void __iomem *iomem;
+ unsigned int id;
+
+ struct dma_chan common;
+ struct tasklet_struct tasklet;
+
+ spinlock_t lock;
+ struct list_head pending_list;
+ struct list_head done_list;
+ struct xscd_dma_tx_descriptor *active_desc;
+ unsigned int enabled;
+ unsigned int running;
+ wait_queue_head_t wait;
+};
+
+static inline struct xscd_dma_chan *to_xscd_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct xscd_dma_chan, common);
+}
+
+/**
+ * struct xscd_chan - Video Stream structure
+ * @id: scene change channel ID
+ * @iomem: I/O memory address of the channel registers
+ * @xscd: SCD device
+ * @subdev: V4L2 subdevice
+ * @pads: media pads
+ * @format: active V4L2 media bus format for the pad
+ * @event: scene change event
+ * @dmachan: dma channel part of the scenechange stream
+ * @lock: lock to protect active stream count variable
+ */
+struct xscd_chan {
+ int id;
+ void __iomem *iomem;
+ struct xscd_device *xscd;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_event event;
+ struct xscd_dma_chan dmachan;
+
+ /* Lock to protect active stream count */
+ struct mutex lock;
+};
+
+static inline struct xscd_chan *to_xscd_chan(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xscd_chan, subdev);
+}
+
+/**
+ * struct xscd_device - Xilinx Scene Change Detection device structure
+ * @dev: (OF) device
+ * @iomem: device I/O register space remapped to kernel virtual memory
+ * @rst_gpio: reset GPIO
+ * @clk: video core clock
+ * @irq: Device IRQ
+ * @memory_based: Flag to identify memory based mode
+ * @num_streams: Number of streams in the design
+ * @chans: video stream instances
+ * @dma_device: DMA device structure
+ * @channels: DMA channels
+ * @lock: Protects the running field
+ * @running: True when the SCD core is running
+ */
+struct xscd_device {
+ struct device *dev;
+ void __iomem *iomem;
+ struct gpio_desc *rst_gpio;
+ struct clk *clk;
+ int irq;
+
+ u8 memory_based;
+ int num_streams;
+
+ struct xscd_chan *chans;
+
+ struct dma_device dma_device;
+ struct xscd_dma_chan *channels[XSCD_MAX_CHANNELS];
+
+ /* This lock is to protect the running field */
+ spinlock_t lock;
+ u8 running;
+};
+
+/*
+ * Register related operations
+ */
+static inline u32 xscd_read(void __iomem *iomem, u32 addr)
+{
+ return ioread32(iomem + addr);
+}
+
+static inline void xscd_write(void __iomem *iomem, u32 addr, u32 value)
+{
+ iowrite32(value, iomem + addr);
+}
+
+static inline void xscd_clr(void __iomem *iomem, u32 addr, u32 clr)
+{
+ xscd_write(iomem, addr, xscd_read(iomem, addr) & ~clr);
+}
+
+static inline void xscd_set(void __iomem *iomem, u32 addr, u32 set)
+{
+ xscd_write(iomem, addr, xscd_read(iomem, addr) | set);
+}
+
+void xscd_dma_enable_channel(struct xscd_dma_chan *chan, bool enable);
+void xscd_dma_irq_handler(struct xscd_device *xscd);
+int xscd_dma_init(struct xscd_device *xscd);
+void xscd_dma_cleanup(struct xscd_device *xscd);
+
+void xscd_chan_event_notify(struct xscd_chan *chan);
+int xscd_chan_init(struct xscd_device *xscd, unsigned int chan_id,
+ struct device_node *node);
+#endif
diff --git a/drivers/media/platform/xilinx/xilinx-sdirxss.c b/drivers/media/platform/xilinx/xilinx-sdirxss.c
new file mode 100644
index 000000000000..1ee54ce28d22
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-sdirxss.c
@@ -0,0 +1,1855 @@
+/*
+ * Xilinx SDI Rx Subsystem
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Contacts: Vishal Sagar <vsagar@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/media/xilinx-vip.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/v4l2-subdev.h>
+#include <linux/xilinx-sdirxss.h>
+#include <linux/xilinx-v4l2-controls.h>
+#include <media/media-entity.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include "xilinx-vip.h"
+
+/*
+ * SDI Rx register map, bitmask and offsets
+ */
+#define XSDIRX_RST_CTRL_REG 0x00
+#define XSDIRX_MDL_CTRL_REG 0x04
+#define XSDIRX_GLBL_IER_REG 0x0C
+#define XSDIRX_ISR_REG 0x10
+#define XSDIRX_IER_REG 0x14
+#define XSDIRX_ST352_VALID_REG 0x18
+#define XSDIRX_ST352_DS1_REG 0x1C
+#define XSDIRX_ST352_DS3_REG 0x20
+#define XSDIRX_ST352_DS5_REG 0x24
+#define XSDIRX_ST352_DS7_REG 0x28
+#define XSDIRX_ST352_DS9_REG 0x2C
+#define XSDIRX_ST352_DS11_REG 0x30
+#define XSDIRX_ST352_DS13_REG 0x34
+#define XSDIRX_ST352_DS15_REG 0x38
+#define XSDIRX_VERSION_REG 0x3C
+#define XSDIRX_SS_CONFIG_REG 0x40
+#define XSDIRX_MODE_DET_STAT_REG 0x44
+#define XSDIRX_TS_DET_STAT_REG 0x48
+#define XSDIRX_EDH_STAT_REG 0x4C
+#define XSDIRX_EDH_ERRCNT_EN_REG 0x50
+#define XSDIRX_EDH_ERRCNT_REG 0x54
+#define XSDIRX_CRC_ERRCNT_REG 0x58
+#define XSDIRX_VID_LOCK_WINDOW_REG 0x5C
+#define XSDIRX_SB_RX_STS_REG 0x60
+
+#define XSDIRX_RST_CTRL_SS_EN_MASK BIT(0)
+#define XSDIRX_RST_CTRL_SRST_MASK BIT(1)
+#define XSDIRX_RST_CTRL_RST_CRC_ERRCNT_MASK BIT(2)
+#define XSDIRX_RST_CTRL_RST_EDH_ERRCNT_MASK BIT(3)
+#define XSDIRX_RST_CTRL_SDIRX_BRIDGE_ENB_MASK BIT(8)
+#define XSDIRX_RST_CTRL_VIDIN_AXI4S_MOD_ENB_MASK BIT(9)
+
+#define XSDIRX_MDL_CTRL_FRM_EN_MASK BIT(4)
+#define XSDIRX_MDL_CTRL_MODE_DET_EN_MASK BIT(5)
+#define XSDIRX_MDL_CTRL_MODE_HD_EN_MASK BIT(8)
+#define XSDIRX_MDL_CTRL_MODE_SD_EN_MASK BIT(9)
+#define XSDIRX_MDL_CTRL_MODE_3G_EN_MASK BIT(10)
+#define XSDIRX_MDL_CTRL_MODE_6G_EN_MASK BIT(11)
+#define XSDIRX_MDL_CTRL_MODE_12GI_EN_MASK BIT(12)
+#define XSDIRX_MDL_CTRL_MODE_12GF_EN_MASK BIT(13)
+#define XSDIRX_MDL_CTRL_MODE_AUTO_DET_MASK GENMASK(13, 8)
+
+#define XSDIRX_MDL_CTRL_FORCED_MODE_OFFSET 16
+#define XSDIRX_MDL_CTRL_FORCED_MODE_MASK GENMASK(18, 16)
+
+#define XSDIRX_GLBL_INTR_EN_MASK BIT(0)
+
+#define XSDIRX_INTR_VIDLOCK_MASK BIT(0)
+#define XSDIRX_INTR_VIDUNLOCK_MASK BIT(1)
+#define XSDIRX_INTR_OVERFLOW_MASK BIT(9)
+#define XSDIRX_INTR_UNDERFLOW_MASK BIT(10)
+
+#define XSDIRX_INTR_ALL_MASK (XSDIRX_INTR_VIDLOCK_MASK |\
+ XSDIRX_INTR_VIDUNLOCK_MASK |\
+ XSDIRX_INTR_OVERFLOW_MASK |\
+ XSDIRX_INTR_UNDERFLOW_MASK)
+
+#define XSDIRX_ST352_VALID_DS1_MASK BIT(0)
+#define XSDIRX_ST352_VALID_DS3_MASK BIT(1)
+#define XSDIRX_ST352_VALID_DS5_MASK BIT(2)
+#define XSDIRX_ST352_VALID_DS7_MASK BIT(3)
+#define XSDIRX_ST352_VALID_DS9_MASK BIT(4)
+#define XSDIRX_ST352_VALID_DS11_MASK BIT(5)
+#define XSDIRX_ST352_VALID_DS13_MASK BIT(6)
+#define XSDIRX_ST352_VALID_DS15_MASK BIT(7)
+
+#define XSDIRX_MODE_DET_STAT_RX_MODE_MASK GENMASK(2, 0)
+#define XSDIRX_MODE_DET_STAT_MODE_LOCK_MASK BIT(3)
+#define XSDIRX_MODE_DET_STAT_ACT_STREAM_MASK GENMASK(6, 4)
+#define XSDIRX_MODE_DET_STAT_ACT_STREAM_OFFSET 4
+#define XSDIRX_MODE_DET_STAT_LVLB_3G_MASK BIT(7)
+
+#define XSDIRX_ACTIVE_STREAMS_1 0x0
+#define XSDIRX_ACTIVE_STREAMS_2 0x1
+#define XSDIRX_ACTIVE_STREAMS_4 0x2
+#define XSDIRX_ACTIVE_STREAMS_8 0x3
+#define XSDIRX_ACTIVE_STREAMS_16 0x4
+
+#define XSDIRX_TS_DET_STAT_LOCKED_MASK BIT(0)
+#define XSDIRX_TS_DET_STAT_SCAN_MASK BIT(1)
+#define XSDIRX_TS_DET_STAT_SCAN_OFFSET (1)
+#define XSDIRX_TS_DET_STAT_FAMILY_MASK GENMASK(7, 4)
+#define XSDIRX_TS_DET_STAT_FAMILY_OFFSET (4)
+#define XSDIRX_TS_DET_STAT_RATE_MASK GENMASK(11, 8)
+#define XSDIRX_TS_DET_STAT_RATE_OFFSET (8)
+
+#define XSDIRX_TS_DET_STAT_RATE_NONE 0x0
+#define XSDIRX_TS_DET_STAT_RATE_23_98HZ 0x2
+#define XSDIRX_TS_DET_STAT_RATE_24HZ 0x3
+#define XSDIRX_TS_DET_STAT_RATE_47_95HZ 0x4
+#define XSDIRX_TS_DET_STAT_RATE_25HZ 0x5
+#define XSDIRX_TS_DET_STAT_RATE_29_97HZ 0x6
+#define XSDIRX_TS_DET_STAT_RATE_30HZ 0x7
+#define XSDIRX_TS_DET_STAT_RATE_48HZ 0x8
+#define XSDIRX_TS_DET_STAT_RATE_50HZ 0x9
+#define XSDIRX_TS_DET_STAT_RATE_59_94HZ 0xA
+#define XSDIRX_TS_DET_STAT_RATE_60HZ 0xB
+
+#define XSDIRX_EDH_STAT_EDH_AP_MASK BIT(0)
+#define XSDIRX_EDH_STAT_EDH_FF_MASK BIT(1)
+#define XSDIRX_EDH_STAT_EDH_ANC_MASK BIT(2)
+#define XSDIRX_EDH_STAT_AP_FLAG_MASK GENMASK(8, 4)
+#define XSDIRX_EDH_STAT_FF_FLAG_MASK GENMASK(13, 9)
+#define XSDIRX_EDH_STAT_ANC_FLAG_MASK GENMASK(18, 14)
+#define XSDIRX_EDH_STAT_PKT_FLAG_MASK GENMASK(22, 19)
+
+#define XSDIRX_EDH_ERRCNT_COUNT_MASK GENMASK(15, 0)
+
+#define XSDIRX_CRC_ERRCNT_COUNT_MASK GENMASK(31, 16)
+#define XSDIRX_CRC_ERRCNT_DS_CRC_MASK GENMASK(15, 0)
+
+#define XSDIRX_VERSION_REV_MASK GENMASK(7, 0)
+#define XSDIRX_VERSION_PATCHID_MASK GENMASK(11, 8)
+#define XSDIRX_VERSION_VER_REV_MASK GENMASK(15, 12)
+#define XSDIRX_VERSION_VER_MIN_MASK GENMASK(23, 16)
+#define XSDIRX_VERSION_VER_MAJ_MASK GENMASK(31, 24)
+
+#define XSDIRX_SS_CONFIG_EDH_INCLUDED_MASK BIT(1)
+
+#define XSDIRX_STAT_SB_RX_TDATA_CHANGE_DONE_MASK BIT(0)
+#define XSDIRX_STAT_SB_RX_TDATA_CHANGE_FAIL_MASK BIT(1)
+#define XSDIRX_STAT_SB_RX_TDATA_GT_RESETDONE_MASK BIT(2)
+#define XSDIRX_STAT_SB_RX_TDATA_GT_BITRATE_MASK BIT(3)
+
+/* Number of media pads */
+#define XSDIRX_MEDIA_PADS (1)
+
+#define XSDIRX_DEFAULT_WIDTH (1920)
+#define XSDIRX_DEFAULT_HEIGHT (1080)
+
+#define XSDIRX_MAX_STR_LENGTH 16
+
+#define XSDIRXSS_SDI_STD_3G 0
+#define XSDIRXSS_SDI_STD_6G 1
+#define XSDIRXSS_SDI_STD_12G_8DS 2
+
+#define XSDIRX_DEFAULT_VIDEO_LOCK_WINDOW 0x3000
+
+#define XSDIRX_MODE_HD_MASK 0x0
+#define XSDIRX_MODE_SD_MASK 0x1
+#define XSDIRX_MODE_3G_MASK 0x2
+#define XSDIRX_MODE_6G_MASK 0x4
+#define XSDIRX_MODE_12GI_MASK 0x5
+#define XSDIRX_MODE_12GF_MASK 0x6
+
+/*
+ * Maximum number of events per file handle.
+ */
+#define XSDIRX_MAX_EVENTS (128)
+
+/* ST352 related macros */
+#define XST352_PAYLOAD_BYTE_MASK 0xFF
+#define XST352_PAYLOAD_BYTE1_SHIFT 0
+#define XST352_PAYLOAD_BYTE2_SHIFT 8
+#define XST352_PAYLOAD_BYTE3_SHIFT 16
+#define XST352_PAYLOAD_BYTE4_SHIFT 24
+
+#define XST352_BYTE1_ST292_1x720L_1_5G 0x84
+#define XST352_BYTE1_ST292_1x1080L_1_5G 0x85
+#define XST352_BYTE1_ST425_2008_750L_3GB 0x88
+#define XST352_BYTE1_ST425_2008_1125L_3GA 0x89
+#define XST352_BYTE1_ST372_DL_3GB 0x8A
+#define XST352_BYTE1_ST372_2x720L_3GB 0x8B
+#define XST352_BYTE1_ST372_2x1080L_3GB 0x8C
+#define XST352_BYTE1_ST2081_10_2160L_6G 0xC0
+#define XST352_BYTE1_ST2081_10_DL_2160L_6G 0xC2
+#define XST352_BYTE1_ST2082_10_2160L_12G 0xCE
+
+#define XST352_BYTE2_TS_TYPE_MASK BIT(15)
+#define XST352_BYTE2_TS_TYPE_OFFSET 15
+#define XST352_BYTE2_PIC_TYPE_MASK BIT(14)
+#define XST352_BYTE2_PIC_TYPE_OFFSET 14
+#define XST352_BYTE2_TS_PIC_TYPE_INTERLACED 0
+#define XST352_BYTE2_TS_PIC_TYPE_PROGRESSIVE 1
+
+#define XST352_BYTE2_FPS_MASK 0xF
+#define XST352_BYTE2_FPS_SHIFT 8
+#define XST352_BYTE2_FPS_24F 0x2
+#define XST352_BYTE2_FPS_24 0x3
+#define XST352_BYTE2_FPS_48F 0x4
+#define XST352_BYTE2_FPS_25 0x5
+#define XST352_BYTE2_FPS_30F 0x6
+#define XST352_BYTE2_FPS_30 0x7
+#define XST352_BYTE2_FPS_48 0x8
+#define XST352_BYTE2_FPS_50 0x9
+#define XST352_BYTE2_FPS_60F 0xA
+#define XST352_BYTE2_FPS_60 0xB
+/* Table 4 ST 2081-10:2015 */
+#define XST352_BYTE2_FPS_96 0xC
+#define XST352_BYTE2_FPS_100 0xD
+#define XST352_BYTE2_FPS_120 0xE
+#define XST352_BYTE2_FPS_120F 0xF
+
+#define XST352_BYTE3_ACT_LUMA_COUNT_MASK BIT(22)
+#define XST352_BYTE3_ACT_LUMA_COUNT_OFFSET 22
+
+#define XST352_BYTE3_COLOR_FORMAT_MASK GENMASK(19, 16)
+#define XST352_BYTE3_COLOR_FORMAT_OFFSET 16
+#define XST352_BYTE3_COLOR_FORMAT_422 0x0
+#define XST352_BYTE3_COLOR_FORMAT_420 0x3
+
+/**
+ * enum sdi_family_enc - SDI Transport Video Format Detected with Active Pixels
+ * @XSDIRX_SMPTE_ST_274: SMPTE ST 274 detected with AP 1920x1080
+ * @XSDIRX_SMPTE_ST_296: SMPTE ST 296 detected with AP 1280x720
+ * @XSDIRX_SMPTE_ST_2048_2: SMPTE ST 2048-2 detected with AP 2048x1080
+ * @XSDIRX_SMPTE_ST_295: SMPTE ST 295 detected with AP 1920x1080
+ * @XSDIRX_NTSC: NTSC encoding detected with AP 720x486
+ * @XSDIRX_PAL: PAL encoding detected with AP 720x576
+ * @XSDIRX_TS_UNKNOWN: Unknown SMPTE Transport family type
+ */
+enum sdi_family_enc {
+ XSDIRX_SMPTE_ST_274 = 0,
+ XSDIRX_SMPTE_ST_296 = 1,
+ XSDIRX_SMPTE_ST_2048_2 = 2,
+ XSDIRX_SMPTE_ST_295 = 3,
+ XSDIRX_NTSC = 8,
+ XSDIRX_PAL = 9,
+ XSDIRX_TS_UNKNOWN = 15
+};
+
+/**
+ * struct xsdirxss_core - Core configuration SDI Rx Subsystem device structure
+ * @dev: Platform structure
+ * @iomem: Base address of subsystem
+ * @irq: requested irq number
+ * @include_edh: EDH processor presence
+ * @mode: 3G/6G/12G mode
+ * @axi_clk: Axi lite interface clock
+ * @sdirx_clk: SDI Rx GT clock
+ * @vidout_clk: Video clock
+ */
+struct xsdirxss_core {
+ struct device *dev;
+ void __iomem *iomem;
+ int irq;
+ bool include_edh;
+ int mode;
+ struct clk *axi_clk;
+ struct clk *sdirx_clk;
+ struct clk *vidout_clk;
+};
+
+/**
+ * struct xsdirxss_state - SDI Rx Subsystem device structure
+ * @core: Core structure for MIPI SDI Rx Subsystem
+ * @subdev: The v4l2 subdev structure
+ * @ctrl_handler: control handler
+ * @event: Holds the video unlock event
+ * @formats: Active V4L2 formats on each pad
+ * @default_format: default V4L2 media bus format
+ * @frame_interval: Captures the frame rate
+ * @vip_format: format information corresponding to the active format
+ * @pads: media pads
+ * @streaming: Flag for storing streaming state
+ * @vidlocked: Flag indicating SDI Rx has locked onto video stream
+ * @ts_is_interlaced: Flag indicating Transport Stream is interlaced.
+ *
+ * This structure contains the device driver related parameters
+ */
+struct xsdirxss_state {
+ struct xsdirxss_core core;
+ struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_event event;
+ struct v4l2_mbus_framefmt formats[XSDIRX_MEDIA_PADS];
+ struct v4l2_mbus_framefmt default_format;
+ struct v4l2_fract frame_interval;
+ const struct xvip_video_format *vip_format;
+ struct media_pad pads[XSDIRX_MEDIA_PADS];
+ bool streaming;
+ bool vidlocked;
+ bool ts_is_interlaced;
+};
+
+static inline struct xsdirxss_state *
+to_xsdirxssstate(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xsdirxss_state, subdev);
+}
+
+/*
+ * Register related operations
+ */
+static inline u32 xsdirxss_read(struct xsdirxss_core *xsdirxss, u32 addr)
+{
+ return ioread32(xsdirxss->iomem + addr);
+}
+
+static inline void xsdirxss_write(struct xsdirxss_core *xsdirxss, u32 addr,
+ u32 value)
+{
+ iowrite32(value, xsdirxss->iomem + addr);
+}
+
+static inline void xsdirxss_clr(struct xsdirxss_core *xsdirxss, u32 addr,
+ u32 clr)
+{
+ xsdirxss_write(xsdirxss, addr, xsdirxss_read(xsdirxss, addr) & ~clr);
+}
+
+static inline void xsdirxss_set(struct xsdirxss_core *xsdirxss, u32 addr,
+ u32 set)
+{
+ xsdirxss_write(xsdirxss, addr, xsdirxss_read(xsdirxss, addr) | set);
+}
+
+static void xsdirx_core_disable(struct xsdirxss_core *core)
+{
+ xsdirxss_clr(core, XSDIRX_RST_CTRL_REG, XSDIRX_RST_CTRL_SS_EN_MASK);
+}
+
+static void xsdirx_core_enable(struct xsdirxss_core *core)
+{
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG, XSDIRX_RST_CTRL_SS_EN_MASK);
+}
+
+static int xsdirx_set_modedetect(struct xsdirxss_core *core, u16 mask)
+{
+ u32 i, val;
+
+ mask &= XSDIRX_DETECT_ALL_MODES;
+ if (!mask) {
+ dev_err(core->dev, "Invalid bit mask = 0x%08x\n", mask);
+ return -EINVAL;
+ }
+
+ dev_dbg(core->dev, "mask = 0x%x\n", mask);
+
+ val = xsdirxss_read(core, XSDIRX_MDL_CTRL_REG);
+ val &= ~(XSDIRX_MDL_CTRL_MODE_DET_EN_MASK);
+ val &= ~(XSDIRX_MDL_CTRL_MODE_AUTO_DET_MASK);
+ val &= ~(XSDIRX_MDL_CTRL_FORCED_MODE_MASK);
+
+ if (hweight16(mask) > 1) {
+ /* Multi mode detection as more than 1 bit set in mask */
+ dev_dbg(core->dev, "Detect multiple modes\n");
+ for (i = 0; i < XSDIRX_MODE_NUM_SUPPORTED; i++) {
+ switch (mask & (1 << i)) {
+ case BIT(XSDIRX_MODE_SD_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_SD_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_HD_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_HD_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_3G_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_3G_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_6G_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_6G_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_12GI_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_12GI_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_12GF_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_12GF_EN_MASK;
+ break;
+ }
+ }
+ val |= XSDIRX_MDL_CTRL_MODE_DET_EN_MASK;
+ } else {
+ /* Fixed Mode */
+ u32 forced_mode_mask = 0;
+
+ dev_dbg(core->dev, "Detect fixed mode\n");
+
+ /* Find offset of first bit set */
+ switch (__ffs(mask)) {
+ case XSDIRX_MODE_SD_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_SD_MASK;
+ break;
+ case XSDIRX_MODE_HD_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_HD_MASK;
+ break;
+ case XSDIRX_MODE_3G_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_3G_MASK;
+ break;
+ case XSDIRX_MODE_6G_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_6G_MASK;
+ break;
+ case XSDIRX_MODE_12GI_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_12GI_MASK;
+ break;
+ case XSDIRX_MODE_12GF_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_12GF_MASK;
+ break;
+ }
+ dev_dbg(core->dev, "Forced Mode Mask : 0x%x\n",
+ forced_mode_mask);
+ val |= forced_mode_mask << XSDIRX_MDL_CTRL_FORCED_MODE_OFFSET;
+ }
+
+ dev_dbg(core->dev, "Modes to be detected : sdi ctrl reg = 0x%08x\n",
+ val);
+ xsdirxss_write(core, XSDIRX_MDL_CTRL_REG, val);
+
+ return 0;
+}
+
+static void xsdirx_framer(struct xsdirxss_core *core, bool flag)
+{
+ if (flag)
+ xsdirxss_set(core, XSDIRX_MDL_CTRL_REG,
+ XSDIRX_MDL_CTRL_FRM_EN_MASK);
+ else
+ xsdirxss_clr(core, XSDIRX_MDL_CTRL_REG,
+ XSDIRX_MDL_CTRL_FRM_EN_MASK);
+}
+
+static void xsdirx_setedherrcnttrigger(struct xsdirxss_core *core, u32 enable)
+{
+ u32 val = xsdirxss_read(core, XSDIRX_EDH_ERRCNT_EN_REG);
+
+ val = enable & XSDIRX_EDH_ALLERR_MASK;
+
+ xsdirxss_write(core, XSDIRX_EDH_ERRCNT_EN_REG, val);
+}
+
+static void xsdirx_setvidlockwindow(struct xsdirxss_core *core, u32 val)
+{
+ /*
+ * The video lock window is the amount of time for which the
+ * the mode and transport stream should be locked to get the
+ * video lock interrupt.
+ */
+ xsdirxss_write(core, XSDIRX_VID_LOCK_WINDOW_REG, val);
+}
+
+static void xsdirx_disableintr(struct xsdirxss_core *core, u32 mask)
+{
+ xsdirxss_clr(core, XSDIRX_IER_REG, mask);
+}
+
+static void xsdirx_enableintr(struct xsdirxss_core *core, u32 mask)
+{
+ xsdirxss_set(core, XSDIRX_IER_REG, mask);
+}
+
+static void xsdirx_globalintr(struct xsdirxss_core *core, bool flag)
+{
+ if (flag)
+ xsdirxss_set(core, XSDIRX_GLBL_IER_REG,
+ XSDIRX_GLBL_INTR_EN_MASK);
+ else
+ xsdirxss_clr(core, XSDIRX_GLBL_IER_REG,
+ XSDIRX_GLBL_INTR_EN_MASK);
+}
+
+static void xsdirx_clearintr(struct xsdirxss_core *core, u32 mask)
+{
+ xsdirxss_set(core, XSDIRX_ISR_REG, mask);
+}
+
+static void xsdirx_vid_bridge_control(struct xsdirxss_core *core, bool enable)
+{
+ if (enable)
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG,
+ XSDIRX_RST_CTRL_SDIRX_BRIDGE_ENB_MASK);
+ else
+ xsdirxss_clr(core, XSDIRX_RST_CTRL_REG,
+ XSDIRX_RST_CTRL_SDIRX_BRIDGE_ENB_MASK);
+}
+
+static void xsdirx_axis4_bridge_control(struct xsdirxss_core *core, bool enable)
+{
+ if (enable)
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG,
+ XSDIRX_RST_CTRL_VIDIN_AXI4S_MOD_ENB_MASK);
+ else
+ xsdirxss_clr(core, XSDIRX_RST_CTRL_REG,
+ XSDIRX_RST_CTRL_VIDIN_AXI4S_MOD_ENB_MASK);
+}
+
+static void xsdirx_streamflow_control(struct xsdirxss_core *core, bool enable)
+{
+ /* The sdi to native bridge is followed by native to axis4 bridge */
+ if (enable) {
+ xsdirx_axis4_bridge_control(core, enable);
+ xsdirx_vid_bridge_control(core, enable);
+ } else {
+ xsdirx_vid_bridge_control(core, enable);
+ xsdirx_axis4_bridge_control(core, enable);
+ }
+}
+
+static void xsdirx_streamdowncb(struct xsdirxss_core *core)
+{
+ xsdirx_streamflow_control(core, false);
+}
+
+static void xsdirxss_get_framerate(struct v4l2_fract *frame_interval,
+ u32 framerate)
+{
+ switch (framerate) {
+ case XSDIRX_TS_DET_STAT_RATE_23_98HZ:
+ frame_interval->numerator = 1001;
+ frame_interval->denominator = 24000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_24HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 24000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_25HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 25000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_29_97HZ:
+ frame_interval->numerator = 1001;
+ frame_interval->denominator = 30000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_30HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 30000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_47_95HZ:
+ frame_interval->numerator = 1001;
+ frame_interval->denominator = 48000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_48HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 48000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_50HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 50000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_59_94HZ:
+ frame_interval->numerator = 1001;
+ frame_interval->denominator = 60000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_60HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 60000;
+ break;
+ default:
+ frame_interval->numerator = 1;
+ frame_interval->denominator = 1;
+ }
+}
+
+/**
+ * xsdirx_get_stream_properties - Get SDI Rx stream properties
+ * @state: pointer to driver state
+ *
+ * This function decodes the stream's ST352 payload (if available) to get
+ * stream properties like width, height, picture type (interlaced/progressive),
+ * etc.
+ *
+ * Return: 0 for success else errors
+ */
+static int xsdirx_get_stream_properties(struct xsdirxss_state *state)
+{
+ struct xsdirxss_core *core = &state->core;
+ u32 mode, payload = 0, val, family, valid, tscan;
+ u8 byte1 = 0, active_luma = 0, pic_type = 0, framerate = 0;
+ u8 sampling = XST352_BYTE3_COLOR_FORMAT_422;
+ struct v4l2_mbus_framefmt *format = &state->formats[0];
+
+ mode = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ mode &= XSDIRX_MODE_DET_STAT_RX_MODE_MASK;
+
+ valid = xsdirxss_read(core, XSDIRX_ST352_VALID_REG);
+
+ if ((mode >= XSDIRX_MODE_3G_MASK) && !valid) {
+ dev_err(core->dev, "No valid ST352 payload present even for 3G mode and above\n");
+ return -EINVAL;
+ }
+
+ val = xsdirxss_read(core, XSDIRX_TS_DET_STAT_REG);
+ if (valid & XSDIRX_ST352_VALID_DS1_MASK) {
+ payload = xsdirxss_read(core, XSDIRX_ST352_DS1_REG);
+ byte1 = (payload >> XST352_PAYLOAD_BYTE1_SHIFT) &
+ XST352_PAYLOAD_BYTE_MASK;
+ active_luma = (payload & XST352_BYTE3_ACT_LUMA_COUNT_MASK) >>
+ XST352_BYTE3_ACT_LUMA_COUNT_OFFSET;
+ pic_type = (payload & XST352_BYTE2_PIC_TYPE_MASK) >>
+ XST352_BYTE2_PIC_TYPE_OFFSET;
+ framerate = (payload >> XST352_BYTE2_FPS_SHIFT) &
+ XST352_BYTE2_FPS_MASK;
+ tscan = (payload & XST352_BYTE2_TS_TYPE_MASK) >>
+ XST352_BYTE2_TS_TYPE_OFFSET;
+ sampling = (payload & XST352_BYTE3_COLOR_FORMAT_MASK) >>
+ XST352_BYTE3_COLOR_FORMAT_OFFSET;
+ } else {
+ dev_dbg(core->dev, "No ST352 payload available : Mode = %d\n",
+ mode);
+ framerate = (val & XSDIRX_TS_DET_STAT_RATE_MASK) >>
+ XSDIRX_TS_DET_STAT_RATE_OFFSET;
+ tscan = (val & XSDIRX_TS_DET_STAT_SCAN_MASK) >>
+ XSDIRX_TS_DET_STAT_SCAN_OFFSET;
+ }
+
+ family = (val & XSDIRX_TS_DET_STAT_FAMILY_MASK) >>
+ XSDIRX_TS_DET_STAT_FAMILY_OFFSET;
+ state->ts_is_interlaced = tscan ? false : true;
+
+ dev_dbg(core->dev, "ts_is_interlaced = %d, family = %d\n",
+ state->ts_is_interlaced, family);
+
+ switch (mode) {
+ case XSDIRX_MODE_HD_MASK:
+ if (!valid) {
+ /* No payload obtained */
+ dev_dbg(core->dev, "frame rate : %d, tscan = %d\n",
+ framerate, tscan);
+ /*
+ * NOTE : A progressive segmented frame pSF will be
+ * reported incorrectly as Interlaced as we rely on IP's
+ * transport scan locked bit.
+ */
+ dev_warn(core->dev, "pSF will be incorrectly reported as Interlaced\n");
+
+ switch (framerate) {
+ case XSDIRX_TS_DET_STAT_RATE_23_98HZ:
+ case XSDIRX_TS_DET_STAT_RATE_24HZ:
+ case XSDIRX_TS_DET_STAT_RATE_25HZ:
+ case XSDIRX_TS_DET_STAT_RATE_29_97HZ:
+ case XSDIRX_TS_DET_STAT_RATE_30HZ:
+ if (family == XSDIRX_SMPTE_ST_296) {
+ format->width = 1280;
+ format->height = 720;
+ format->field = V4L2_FIELD_NONE;
+ } else if (family == XSDIRX_SMPTE_ST_2048_2) {
+ format->width = 2048;
+ format->height = 1080;
+ if (tscan)
+ format->field = V4L2_FIELD_NONE;
+ else
+ format->field =
+ V4L2_FIELD_ALTERNATE;
+ } else {
+ format->width = 1920;
+ format->height = 1080;
+ if (tscan)
+ format->field = V4L2_FIELD_NONE;
+ else
+ format->field =
+ V4L2_FIELD_ALTERNATE;
+ }
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_50HZ:
+ case XSDIRX_TS_DET_STAT_RATE_59_94HZ:
+ case XSDIRX_TS_DET_STAT_RATE_60HZ:
+ if (family == XSDIRX_SMPTE_ST_274) {
+ format->width = 1920;
+ format->height = 1080;
+ } else {
+ format->width = 1280;
+ format->height = 720;
+ }
+ format->field = V4L2_FIELD_NONE;
+ break;
+ default:
+ format->width = 1920;
+ format->height = 1080;
+ format->field = V4L2_FIELD_NONE;
+ }
+ } else {
+ dev_dbg(core->dev, "Got the payload\n");
+ switch (byte1) {
+ case XST352_BYTE1_ST292_1x720L_1_5G:
+ /* SMPTE ST 292-1 for 720 line payloads */
+ format->width = 1280;
+ format->height = 720;
+ break;
+ case XST352_BYTE1_ST292_1x1080L_1_5G:
+ /* SMPTE ST 292-1 for 1080 line payloads */
+ format->height = 1080;
+ if (active_luma)
+ format->width = 2048;
+ else
+ format->width = 1920;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown HD Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ case XSDIRX_MODE_SD_MASK:
+ format->field = V4L2_FIELD_ALTERNATE;
+
+ switch (family) {
+ case XSDIRX_NTSC:
+ format->width = 720;
+ format->height = 486;
+ break;
+ case XSDIRX_PAL:
+ format->width = 720;
+ format->height = 576;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown SD Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ break;
+ case XSDIRX_MODE_3G_MASK:
+ switch (byte1) {
+ case XST352_BYTE1_ST425_2008_750L_3GB:
+ /* Sec 4.1.6.1 SMPTE 425-2008 */
+ case XST352_BYTE1_ST372_2x720L_3GB:
+ /* Table 13 SMPTE 425-2008 */
+ format->width = 1280;
+ format->height = 720;
+ break;
+ case XST352_BYTE1_ST425_2008_1125L_3GA:
+ /* ST352 Table SMPTE 425-1 */
+ case XST352_BYTE1_ST372_DL_3GB:
+ /* Table 13 SMPTE 425-2008 */
+ case XST352_BYTE1_ST372_2x1080L_3GB:
+ /* Table 13 SMPTE 425-2008 */
+ format->height = 1080;
+ if (active_luma)
+ format->width = 2048;
+ else
+ format->width = 1920;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown 3G Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ break;
+ case XSDIRX_MODE_6G_MASK:
+ switch (byte1) {
+ case XST352_BYTE1_ST2081_10_DL_2160L_6G:
+ /* Dual link 6G */
+ case XST352_BYTE1_ST2081_10_2160L_6G:
+ /* Table 3 SMPTE ST 2081-10 */
+ format->height = 2160;
+ if (active_luma)
+ format->width = 4096;
+ else
+ format->width = 3840;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown 6G Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ break;
+ case XSDIRX_MODE_12GI_MASK:
+ case XSDIRX_MODE_12GF_MASK:
+ switch (byte1) {
+ case XST352_BYTE1_ST2082_10_2160L_12G:
+ /* Section 4.3.1 SMPTE ST 2082-10 */
+ format->height = 2160;
+ if (active_luma)
+ format->width = 4096;
+ else
+ format->width = 3840;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown 12G Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(core->dev, "Invalid Mode\n");
+ return -EINVAL;
+ }
+
+ if (valid) {
+ if (pic_type)
+ format->field = V4L2_FIELD_NONE;
+ else
+ format->field = V4L2_FIELD_ALTERNATE;
+ }
+
+ if (format->field == V4L2_FIELD_ALTERNATE)
+ format->height = format->height / 2;
+
+ switch (sampling) {
+ case XST352_BYTE3_COLOR_FORMAT_420:
+ format->code = MEDIA_BUS_FMT_VYYUYY10_4X20;
+ break;
+ case XST352_BYTE3_COLOR_FORMAT_422:
+ format->code = MEDIA_BUS_FMT_UYVY10_1X20;
+ break;
+ default:
+ dev_err(core->dev, "Unsupported color format : %d\n", sampling);
+ return -EINVAL;
+ }
+
+ xsdirxss_get_framerate(&state->frame_interval, framerate);
+
+ dev_dbg(core->dev, "Stream width = %d height = %d Field = %d payload = 0x%08x ts = 0x%08x\n",
+ format->width, format->height, format->field, payload, val);
+ dev_dbg(core->dev, "frame rate numerator = %d denominator = %d\n",
+ state->frame_interval.numerator,
+ state->frame_interval.denominator);
+ dev_dbg(core->dev, "Stream code = 0x%x\n", format->code);
+ return 0;
+}
+
+/**
+ * xsdirxss_irq_handler - Interrupt handler for SDI Rx
+ * @irq: IRQ number
+ * @dev_id: Pointer to device state
+ *
+ * The SDI Rx interrupts are cleared by first setting and then clearing the bits
+ * in the interrupt clear register. The interrupt status register is read only.
+ *
+ * Return: IRQ_HANDLED after handling interrupts
+ */
+static irqreturn_t xsdirxss_irq_handler(int irq, void *dev_id)
+{
+ struct xsdirxss_state *state = (struct xsdirxss_state *)dev_id;
+ struct xsdirxss_core *core = &state->core;
+ u32 status;
+
+ status = xsdirxss_read(core, XSDIRX_ISR_REG);
+ dev_dbg(core->dev, "interrupt status = 0x%08x\n", status);
+
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XSDIRX_INTR_VIDLOCK_MASK) {
+ u32 val1, val2;
+
+ dev_dbg(core->dev, "video lock interrupt\n");
+ xsdirx_clearintr(core, XSDIRX_INTR_VIDLOCK_MASK);
+
+ val1 = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val2 = xsdirxss_read(core, XSDIRX_TS_DET_STAT_REG);
+
+ if ((val1 & XSDIRX_MODE_DET_STAT_MODE_LOCK_MASK) &&
+ (val2 & XSDIRX_TS_DET_STAT_LOCKED_MASK)) {
+ u32 mask = XSDIRX_RST_CTRL_RST_CRC_ERRCNT_MASK |
+ XSDIRX_RST_CTRL_RST_EDH_ERRCNT_MASK;
+
+ dev_dbg(core->dev, "mode & ts lock occurred\n");
+
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG, mask);
+ xsdirxss_clr(core, XSDIRX_RST_CTRL_REG, mask);
+
+ val1 = xsdirxss_read(core, XSDIRX_ST352_VALID_REG);
+ val2 = xsdirxss_read(core, XSDIRX_ST352_DS1_REG);
+
+ dev_dbg(core->dev, "valid st352 mask = 0x%08x\n", val1);
+ dev_dbg(core->dev, "st352 payload = 0x%08x\n", val2);
+
+ if (!xsdirx_get_stream_properties(state)) {
+ memset(&state->event, 0, sizeof(state->event));
+ state->event.type = V4L2_EVENT_SOURCE_CHANGE;
+ state->event.u.src_change.changes =
+ V4L2_EVENT_SRC_CH_RESOLUTION;
+ v4l2_subdev_notify_event(&state->subdev,
+ &state->event);
+
+ state->vidlocked = true;
+ } else {
+ dev_err(core->dev, "Unable to get stream properties!\n");
+ state->vidlocked = false;
+ }
+ } else {
+ dev_dbg(core->dev, "video unlock before video lock!\n");
+ state->vidlocked = false;
+ }
+ }
+
+ if (status & XSDIRX_INTR_VIDUNLOCK_MASK) {
+ dev_dbg(core->dev, "video unlock interrupt\n");
+ xsdirx_clearintr(core, XSDIRX_INTR_VIDUNLOCK_MASK);
+ xsdirx_streamdowncb(core);
+
+ memset(&state->event, 0, sizeof(state->event));
+ state->event.type = V4L2_EVENT_XLNXSDIRX_VIDUNLOCK;
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+
+ state->vidlocked = false;
+ }
+
+ if (status & XSDIRX_INTR_UNDERFLOW_MASK) {
+ dev_dbg(core->dev, "Video in to AXI4 Stream core underflow interrupt\n");
+ xsdirx_clearintr(core, XSDIRX_INTR_UNDERFLOW_MASK);
+
+ memset(&state->event, 0, sizeof(state->event));
+ state->event.type = V4L2_EVENT_XLNXSDIRX_UNDERFLOW;
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XSDIRX_INTR_OVERFLOW_MASK) {
+ dev_dbg(core->dev, "Video in to AXI4 Stream core overflow interrupt\n");
+ xsdirx_clearintr(core, XSDIRX_INTR_OVERFLOW_MASK);
+
+ memset(&state->event, 0, sizeof(state->event));
+ state->event.type = V4L2_EVENT_XLNXSDIRX_OVERFLOW;
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * xsdirxss_subscribe_event - Subscribe to video lock and unlock event
+ * @sd: V4L2 Sub device
+ * @fh: V4L2 File Handle
+ * @sub: Subcribe event structure
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xsdirxss_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret;
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ switch (sub->type) {
+ case V4L2_EVENT_XLNXSDIRX_VIDUNLOCK:
+ case V4L2_EVENT_XLNXSDIRX_UNDERFLOW:
+ case V4L2_EVENT_XLNXSDIRX_OVERFLOW:
+ ret = v4l2_event_subscribe(fh, sub, XSDIRX_MAX_EVENTS, NULL);
+ break;
+ case V4L2_EVENT_SOURCE_CHANGE:
+ ret = v4l2_src_change_event_subscribe(fh, sub);
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev_dbg(core->dev, "Event subscribed : 0x%08x\n", sub->type);
+ return ret;
+}
+
+/**
+ * xsdirxss_unsubscribe_event - Unsubscribe from all events registered
+ * @sd: V4L2 Sub device
+ * @fh: V4L2 file handle
+ * @sub: pointer to Event unsubscription structure
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static int xsdirxss_unsubscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ dev_dbg(core->dev, "Event unsubscribe : 0x%08x\n", sub->type);
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+/**
+ * xsdirxss_s_ctrl - This is used to set the Xilinx SDI Rx V4L2 controls
+ * @ctrl: V4L2 control to be set
+ *
+ * This function is used to set the V4L2 controls for the Xilinx SDI Rx
+ * Subsystem.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xsdirxss_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int ret = 0;
+ struct xsdirxss_state *xsdirxss =
+ container_of(ctrl->handler,
+ struct xsdirxss_state, ctrl_handler);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ dev_dbg(core->dev, "set ctrl id = 0x%08x val = 0x%08x\n",
+ ctrl->id, ctrl->val);
+
+ if (xsdirxss->streaming) {
+ dev_err(core->dev, "Cannot set controls while streaming\n");
+ return -EINVAL;
+ }
+
+ xsdirx_core_disable(core);
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_SDIRX_FRAMER:
+ xsdirx_framer(core, ctrl->val);
+ break;
+ case V4L2_CID_XILINX_SDIRX_VIDLOCK_WINDOW:
+ xsdirx_setvidlockwindow(core, ctrl->val);
+ break;
+ case V4L2_CID_XILINX_SDIRX_EDH_ERRCNT_ENABLE:
+ xsdirx_setedherrcnttrigger(core, ctrl->val);
+ break;
+ case V4L2_CID_XILINX_SDIRX_SEARCH_MODES:
+ if (ctrl->val) {
+ if (core->mode == XSDIRXSS_SDI_STD_3G) {
+ dev_dbg(core->dev, "Upto 3G supported\n");
+ ctrl->val &= ~(BIT(XSDIRX_MODE_6G_OFFSET) |
+ BIT(XSDIRX_MODE_12GI_OFFSET) |
+ BIT(XSDIRX_MODE_12GF_OFFSET));
+ }
+
+ if (core->mode == XSDIRXSS_SDI_STD_6G) {
+ dev_dbg(core->dev, "Upto 6G supported\n");
+ ctrl->val &= ~(BIT(XSDIRX_MODE_12GI_OFFSET) |
+ BIT(XSDIRX_MODE_12GF_OFFSET));
+ }
+
+ ret = xsdirx_set_modedetect(core, ctrl->val);
+ } else {
+ dev_err(core->dev, "Select at least one mode!\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG,
+ XSDIRX_RST_CTRL_SS_EN_MASK);
+ return -EINVAL;
+ }
+ xsdirx_core_enable(core);
+ return ret;
+}
+
+/**
+ * xsdirxss_g_volatile_ctrl - get the Xilinx SDI Rx controls
+ * @ctrl: Pointer to V4L2 control
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xsdirxss_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ u32 val;
+ struct xsdirxss_state *xsdirxss =
+ container_of(ctrl->handler,
+ struct xsdirxss_state, ctrl_handler);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_SDIRX_MODE_DETECT:
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Can't get values when video not locked!\n");
+ return -EINVAL;
+ }
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_RX_MODE_MASK;
+
+ switch (val) {
+ case XSDIRX_MODE_SD_MASK:
+ ctrl->val = XSDIRX_MODE_SD_OFFSET;
+ break;
+ case XSDIRX_MODE_HD_MASK:
+ ctrl->val = XSDIRX_MODE_HD_OFFSET;
+ break;
+ case XSDIRX_MODE_3G_MASK:
+ ctrl->val = XSDIRX_MODE_3G_OFFSET;
+ break;
+ case XSDIRX_MODE_6G_MASK:
+ ctrl->val = XSDIRX_MODE_6G_OFFSET;
+ break;
+ case XSDIRX_MODE_12GI_MASK:
+ ctrl->val = XSDIRX_MODE_12GI_OFFSET;
+ break;
+ case XSDIRX_MODE_12GF_MASK:
+ ctrl->val = XSDIRX_MODE_12GF_OFFSET;
+ break;
+ }
+ break;
+ case V4L2_CID_XILINX_SDIRX_CRC:
+ ctrl->val = xsdirxss_read(core, XSDIRX_CRC_ERRCNT_REG);
+ xsdirxss_write(core, XSDIRX_CRC_ERRCNT_REG, 0xFFFF);
+ break;
+ case V4L2_CID_XILINX_SDIRX_EDH_ERRCNT:
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_RX_MODE_MASK;
+ if (val == XSDIRX_MODE_SD_MASK) {
+ ctrl->val = xsdirxss_read(core, XSDIRX_EDH_ERRCNT_REG);
+ } else {
+ dev_dbg(core->dev, "%d - not in SD mode\n", ctrl->id);
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_XILINX_SDIRX_EDH_STATUS:
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_RX_MODE_MASK;
+ if (val == XSDIRX_MODE_SD_MASK) {
+ ctrl->val = xsdirxss_read(core, XSDIRX_EDH_STAT_REG);
+ } else {
+ dev_dbg(core->dev, "%d - not in SD mode\n", ctrl->id);
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_XILINX_SDIRX_TS_IS_INTERLACED:
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Can't get values when video not locked!\n");
+ return -EINVAL;
+ }
+ ctrl->val = xsdirxss->ts_is_interlaced;
+ break;
+ case V4L2_CID_XILINX_SDIRX_ACTIVE_STREAMS:
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Can't get values when video not locked!\n");
+ return -EINVAL;
+ }
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_ACT_STREAM_MASK;
+ val >>= XSDIRX_MODE_DET_STAT_ACT_STREAM_OFFSET;
+ ctrl->val = 1 << val;
+ break;
+ case V4L2_CID_XILINX_SDIRX_IS_3GB:
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Can't get values when video not locked!\n");
+ return -EINVAL;
+ }
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_LVLB_3G_MASK;
+ ctrl->val = val ? true : false;
+ break;
+ default:
+ dev_err(core->dev, "Get Invalid control id 0x%0x\n", ctrl->id);
+ return -EINVAL;
+ }
+ dev_dbg(core->dev, "Get ctrl id = 0x%08x val = 0x%08x\n",
+ ctrl->id, ctrl->val);
+ return 0;
+}
+
+/**
+ * xsdirxss_log_status - Logs the status of the SDI Rx Subsystem
+ * @sd: Pointer to V4L2 subdevice structure
+ *
+ * This function prints the current status of Xilinx SDI Rx Subsystem
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_log_status(struct v4l2_subdev *sd)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+ u32 data, i;
+
+ v4l2_info(sd, "***** SDI Rx subsystem reg dump start *****\n");
+ for (i = 0; i < 0x28; i++) {
+ data = xsdirxss_read(core, i * 4);
+ v4l2_info(sd, "offset 0x%08x data 0x%08x\n",
+ i * 4, data);
+ }
+ v4l2_info(sd, "***** SDI Rx subsystem reg dump end *****\n");
+ return 0;
+}
+
+static void xsdirxss_start_stream(struct xsdirxss_state *xsdirxss)
+{
+ xsdirx_streamflow_control(&xsdirxss->core, true);
+}
+
+static void xsdirxss_stop_stream(struct xsdirxss_state *xsdirxss)
+{
+ xsdirx_streamflow_control(&xsdirxss->core, false);
+}
+
+/**
+ * xsdirxss_g_frame_interval - Get the frame interval
+ * @sd: V4L2 Sub device
+ * @fi: Pointer to V4l2 Sub device frame interval structure
+ *
+ * This function is used to get the frame interval.
+ * The frame rate can be integral or fractional.
+ * Integral frame rate e.g. numerator = 1000, denominator = 24000 => 24 fps
+ * Fractional frame rate e.g. numerator = 1001, denominator = 24000 => 23.97 fps
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Video not locked!\n");
+ return -EINVAL;
+ }
+
+ fi->interval = xsdirxss->frame_interval;
+
+ dev_dbg(core->dev, "frame rate numerator = %d denominator = %d\n",
+ xsdirxss->frame_interval.numerator,
+ xsdirxss->frame_interval.denominator);
+ return 0;
+}
+
+/**
+ * xsdirxss_s_stream - It is used to start/stop the streaming.
+ * @sd: V4L2 Sub device
+ * @enable: Flag (True / False)
+ *
+ * This function controls the start or stop of streaming for the
+ * Xilinx SDI Rx Subsystem.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xsdirxss_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ if (enable) {
+ if (!xsdirxss->vidlocked) {
+ dev_dbg(core->dev, "Video is not locked\n");
+ return -EINVAL;
+ }
+ if (xsdirxss->streaming) {
+ dev_dbg(core->dev, "Already streaming\n");
+ return -EINVAL;
+ }
+
+ xsdirxss_start_stream(xsdirxss);
+ xsdirxss->streaming = true;
+ dev_dbg(core->dev, "Streaming started\n");
+ } else {
+ if (!xsdirxss->streaming) {
+ dev_dbg(core->dev, "Stopped streaming already\n");
+ return -EINVAL;
+ }
+
+ xsdirxss_stop_stream(xsdirxss);
+ xsdirxss->streaming = false;
+ dev_dbg(core->dev, "Streaming stopped\n");
+ }
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__xsdirxss_get_pad_format(struct xsdirxss_state *xsdirxss,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xsdirxss->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xsdirxss->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * xsdirxss_get_format - Get the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to get the pad format information.
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Video not locked!\n");
+ return -EINVAL;
+ }
+
+ fmt->format = *__xsdirxss_get_pad_format(xsdirxss, cfg,
+ fmt->pad, fmt->which);
+
+ dev_dbg(core->dev, "Stream width = %d height = %d Field = %d\n",
+ fmt->format.width, fmt->format.height, fmt->format.field);
+
+ return 0;
+}
+
+/**
+ * xsdirxss_set_format - This is used to set the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to set the pad format.
+ * Since the pad format is fixed in hardware, it can't be
+ * modified on run time.
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *__format;
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+
+ dev_dbg(xsdirxss->core.dev,
+ "set width %d height %d code %d field %d colorspace %d\n",
+ fmt->format.width, fmt->format.height,
+ fmt->format.code, fmt->format.field,
+ fmt->format.colorspace);
+
+ __format = __xsdirxss_get_pad_format(xsdirxss, cfg,
+ fmt->pad, fmt->which);
+
+ /* Currently reset the code to one fixed in hardware */
+ /* TODO : Add checks for width height */
+ fmt->format.code = __format->code;
+
+ return 0;
+}
+
+/**
+ * xsdirxss_open - Called on v4l2_open()
+ * @sd: Pointer to V4L2 sub device structure
+ * @fh: Pointer to V4L2 File handle
+ *
+ * This function is called on v4l2_open(). It sets the default format for pad.
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+
+ format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ *format = xsdirxss->default_format;
+
+ return 0;
+}
+
+/**
+ * xsdirxss_close - Called on v4l2_close()
+ * @sd: Pointer to V4L2 sub device structure
+ * @fh: Pointer to V4L2 File handle
+ *
+ * This function is called on v4l2_close().
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xsdirxss_media_ops = {
+ .link_validate = v4l2_subdev_link_validate
+};
+
+static const struct v4l2_ctrl_ops xsdirxss_ctrl_ops = {
+ .g_volatile_ctrl = xsdirxss_g_volatile_ctrl,
+ .s_ctrl = xsdirxss_s_ctrl
+};
+
+static struct v4l2_ctrl_config xsdirxss_edh_ctrls[] = {
+ {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_EDH_ERRCNT_ENABLE,
+ .name = "SDI Rx : EDH Error Count Enable",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = XSDIRX_EDH_ALLERR_MASK,
+ .def = 0,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_EDH_ERRCNT,
+ .name = "SDI Rx : EDH Error Count",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFF,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_EDH_STATUS,
+ .name = "SDI Rx : EDH Status",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFFFFFF,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }
+};
+
+static struct v4l2_ctrl_config xsdirxss_ctrls[] = {
+ {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_FRAMER,
+ .name = "SDI Rx : Enable Framer",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = true,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_VIDLOCK_WINDOW,
+ .name = "SDI Rx : Video Lock Window",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFFFFFF,
+ .step = 1,
+ .def = XSDIRX_DEFAULT_VIDEO_LOCK_WINDOW,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_SEARCH_MODES,
+ .name = "SDI Rx : Modes search Mask",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = XSDIRX_DETECT_ALL_MODES,
+ .def = XSDIRX_DETECT_ALL_MODES,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_MODE_DETECT,
+ .name = "SDI Rx : Mode Detect Status",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = XSDIRX_MODE_SD_OFFSET,
+ .max = XSDIRX_MODE_12GF_OFFSET,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_CRC,
+ .name = "SDI Rx : CRC Error status",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFFFFFF,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_TS_IS_INTERLACED,
+ .name = "SDI Rx : TS is Interlaced",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .def = false,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_ACTIVE_STREAMS,
+ .name = "SDI Rx : Active Streams",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 16,
+ .def = 1,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_IS_3GB,
+ .name = "SDI Rx : Is 3GB",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .def = false,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }
+};
+
+static const struct v4l2_subdev_core_ops xsdirxss_core_ops = {
+ .log_status = xsdirxss_log_status,
+ .subscribe_event = xsdirxss_subscribe_event,
+ .unsubscribe_event = xsdirxss_unsubscribe_event
+};
+
+static struct v4l2_subdev_video_ops xsdirxss_video_ops = {
+ .g_frame_interval = xsdirxss_g_frame_interval,
+ .s_stream = xsdirxss_s_stream
+};
+
+static struct v4l2_subdev_pad_ops xsdirxss_pad_ops = {
+ .get_fmt = xsdirxss_get_format,
+ .set_fmt = xsdirxss_set_format,
+};
+
+static struct v4l2_subdev_ops xsdirxss_ops = {
+ .core = &xsdirxss_core_ops,
+ .video = &xsdirxss_video_ops,
+ .pad = &xsdirxss_pad_ops
+};
+
+static const struct v4l2_subdev_internal_ops xsdirxss_internal_ops = {
+ .open = xsdirxss_open,
+ .close = xsdirxss_close
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xsdirxss_parse_of(struct xsdirxss_state *xsdirxss)
+{
+ struct device_node *node = xsdirxss->core.dev->of_node;
+ struct device_node *ports = NULL;
+ struct device_node *port = NULL;
+ unsigned int nports = 0;
+ struct xsdirxss_core *core = &xsdirxss->core;
+ int ret;
+ const char *sdi_std;
+
+ core->include_edh = of_property_read_bool(node, "xlnx,include-edh");
+ dev_dbg(core->dev, "EDH property = %s\n",
+ core->include_edh ? "Present" : "Absent");
+
+ ret = of_property_read_string(node, "xlnx,line-rate",
+ &sdi_std);
+ if (ret < 0) {
+ dev_err(core->dev, "xlnx,line-rate property not found\n");
+ return ret;
+ }
+
+ if (!strncmp(sdi_std, "12G_SDI_8DS", XSDIRX_MAX_STR_LENGTH)) {
+ core->mode = XSDIRXSS_SDI_STD_12G_8DS;
+ } else if (!strncmp(sdi_std, "6G_SDI", XSDIRX_MAX_STR_LENGTH)) {
+ core->mode = XSDIRXSS_SDI_STD_6G;
+ } else if (!strncmp(sdi_std, "3G_SDI", XSDIRX_MAX_STR_LENGTH)) {
+ core->mode = XSDIRXSS_SDI_STD_3G;
+ } else {
+ dev_err(core->dev, "Invalid Line Rate\n");
+ return -EINVAL;
+ }
+ dev_dbg(core->dev, "SDI Rx Line Rate = %s, mode = %d\n", sdi_std,
+ core->mode);
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ for_each_child_of_node(ports, port) {
+ const struct xvip_video_format *format;
+ struct device_node *endpoint;
+
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+
+ format = xvip_of_get_format(port);
+ if (IS_ERR(format)) {
+ dev_err(core->dev, "invalid format in DT");
+ return PTR_ERR(format);
+ }
+
+ dev_dbg(core->dev, "vf_code = %d bpc = %d bpp = %d\n",
+ format->vf_code, format->width, format->bpp);
+
+ if (format->vf_code != XVIP_VF_YUV_422 &&
+ format->vf_code != XVIP_VF_YUV_420) {
+ dev_err(core->dev, "Incorrect UG934 video format set.\n");
+ return -EINVAL;
+ }
+ xsdirxss->vip_format = format;
+
+ endpoint = of_get_next_child(port, NULL);
+ if (!endpoint) {
+ dev_err(core->dev, "No port at\n");
+ return -EINVAL;
+ }
+
+ /* Count the number of ports. */
+ nports++;
+ }
+
+ if (nports != 1) {
+ dev_err(core->dev, "invalid number of ports %u\n", nports);
+ return -EINVAL;
+ }
+
+ /* Register interrupt handler */
+ core->irq = irq_of_parse_and_map(node, 0);
+
+ ret = devm_request_irq(core->dev, core->irq, xsdirxss_irq_handler,
+ IRQF_SHARED, "xilinx-sdirxss", xsdirxss);
+ if (ret) {
+ dev_err(core->dev, "Err = %d Interrupt handler reg failed!\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xsdirxss_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xsdirxss_state *xsdirxss;
+ struct xsdirxss_core *core;
+ struct resource *res;
+ int ret;
+ unsigned int num_ctrls, num_edh_ctrls = 0, i;
+
+ xsdirxss = devm_kzalloc(&pdev->dev, sizeof(*xsdirxss), GFP_KERNEL);
+ if (!xsdirxss)
+ return -ENOMEM;
+
+ xsdirxss->core.dev = &pdev->dev;
+ core = &xsdirxss->core;
+
+ core->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(core->axi_clk)) {
+ ret = PTR_ERR(core->axi_clk);
+ dev_err(&pdev->dev, "failed to get s_axi_clk (%d)\n", ret);
+ return ret;
+ }
+
+ core->sdirx_clk = devm_clk_get(&pdev->dev, "sdi_rx_clk");
+ if (IS_ERR(core->sdirx_clk)) {
+ ret = PTR_ERR(core->sdirx_clk);
+ dev_err(&pdev->dev, "failed to get sdi_rx_clk (%d)\n", ret);
+ return ret;
+ }
+
+ core->vidout_clk = devm_clk_get(&pdev->dev, "video_out_clk");
+ if (IS_ERR(core->vidout_clk)) {
+ ret = PTR_ERR(core->vidout_clk);
+ dev_err(&pdev->dev, "failed to get video_out_aclk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(core->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(core->sdirx_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable sdirx_clk (%d)\n", ret);
+ goto rx_clk_err;
+ }
+
+ ret = clk_prepare_enable(core->vidout_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable vidout_clk (%d)\n", ret);
+ goto vidout_clk_err;
+ }
+
+ ret = xsdirxss_parse_of(xsdirxss);
+ if (ret < 0)
+ goto clk_err;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xsdirxss->core.iomem = devm_ioremap_resource(xsdirxss->core.dev, res);
+ if (IS_ERR(xsdirxss->core.iomem)) {
+ ret = PTR_ERR(xsdirxss->core.iomem);
+ goto clk_err;
+ }
+
+ /* Reset the core */
+ xsdirx_streamflow_control(core, false);
+ xsdirx_core_disable(core);
+ xsdirx_clearintr(core, XSDIRX_INTR_ALL_MASK);
+ xsdirx_disableintr(core, XSDIRX_INTR_ALL_MASK);
+ xsdirx_enableintr(core, XSDIRX_INTR_ALL_MASK);
+ xsdirx_globalintr(core, true);
+ xsdirxss_write(core, XSDIRX_CRC_ERRCNT_REG, 0xFFFF);
+
+ /* Initialize V4L2 subdevice and media entity */
+ xsdirxss->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Initialize the default format */
+ xsdirxss->default_format.code = xsdirxss->vip_format->code;
+ xsdirxss->default_format.field = V4L2_FIELD_NONE;
+ xsdirxss->default_format.colorspace = V4L2_COLORSPACE_DEFAULT;
+ xsdirxss->default_format.width = XSDIRX_DEFAULT_WIDTH;
+ xsdirxss->default_format.height = XSDIRX_DEFAULT_HEIGHT;
+
+ xsdirxss->formats[0] = xsdirxss->default_format;
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xsdirxss->subdev;
+ v4l2_subdev_init(subdev, &xsdirxss_ops);
+
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xsdirxss_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ subdev->entity.ops = &xsdirxss_media_ops;
+
+ v4l2_set_subdevdata(subdev, xsdirxss);
+
+ ret = media_entity_pads_init(&subdev->entity, 1, xsdirxss->pads);
+ if (ret < 0)
+ goto error;
+
+ /* Initialise and register the controls */
+ num_ctrls = ARRAY_SIZE(xsdirxss_ctrls);
+
+ if (xsdirxss->core.include_edh)
+ num_edh_ctrls = ARRAY_SIZE(xsdirxss_edh_ctrls);
+
+ v4l2_ctrl_handler_init(&xsdirxss->ctrl_handler,
+ (num_ctrls + num_edh_ctrls));
+
+ for (i = 0; i < num_ctrls; i++) {
+ struct v4l2_ctrl *ctrl;
+
+ dev_dbg(xsdirxss->core.dev, "%d %s ctrl = 0x%x\n",
+ i, xsdirxss_ctrls[i].name, xsdirxss_ctrls[i].id);
+
+ ctrl = v4l2_ctrl_new_custom(&xsdirxss->ctrl_handler,
+ &xsdirxss_ctrls[i], NULL);
+ if (!ctrl) {
+ dev_dbg(xsdirxss->core.dev, "Failed to add %s ctrl\n",
+ xsdirxss_ctrls[i].name);
+ goto error;
+ }
+ }
+
+ if (xsdirxss->core.include_edh) {
+ for (i = 0; i < num_edh_ctrls; i++) {
+ struct v4l2_ctrl *ctrl;
+
+ dev_dbg(xsdirxss->core.dev, "%d %s ctrl = 0x%x\n",
+ i, xsdirxss_edh_ctrls[i].name,
+ xsdirxss_edh_ctrls[i].id);
+
+ ctrl = v4l2_ctrl_new_custom(&xsdirxss->ctrl_handler,
+ &xsdirxss_edh_ctrls[i],
+ NULL);
+ if (!ctrl) {
+ dev_dbg(xsdirxss->core.dev, "Failed to add %s ctrl\n",
+ xsdirxss_edh_ctrls[i].name);
+ goto error;
+ }
+ }
+ } else {
+ dev_dbg(xsdirxss->core.dev, "Not registering the EDH controls as EDH is disabled in IP\n");
+ }
+
+ if (xsdirxss->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xsdirxss->ctrl_handler.error;
+ goto error;
+ }
+
+ subdev->ctrl_handler = &xsdirxss->ctrl_handler;
+
+ ret = v4l2_ctrl_handler_setup(&xsdirxss->ctrl_handler);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to set controls\n");
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, xsdirxss);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ xsdirxss->streaming = false;
+
+ dev_info(xsdirxss->core.dev, "Xilinx SDI Rx Subsystem device found!\n");
+
+ xsdirx_core_enable(core);
+
+ return 0;
+error:
+ v4l2_ctrl_handler_free(&xsdirxss->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+clk_err:
+ clk_disable_unprepare(core->vidout_clk);
+vidout_clk_err:
+ clk_disable_unprepare(core->sdirx_clk);
+rx_clk_err:
+ clk_disable_unprepare(core->axi_clk);
+ return ret;
+}
+
+static int xsdirxss_remove(struct platform_device *pdev)
+{
+ struct xsdirxss_state *xsdirxss = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xsdirxss->subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xsdirxss->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ clk_disable_unprepare(xsdirxss->core.vidout_clk);
+ clk_disable_unprepare(xsdirxss->core.sdirx_clk);
+ clk_disable_unprepare(xsdirxss->core.axi_clk);
+ return 0;
+}
+
+static const struct of_device_id xsdirxss_of_id_table[] = {
+ { .compatible = "xlnx,v-smpte-uhdsdi-rx-ss" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xsdirxss_of_id_table);
+
+static struct platform_driver xsdirxss_driver = {
+ .driver = {
+ .name = "xilinx-sdirxss",
+ .of_match_table = xsdirxss_of_id_table,
+ },
+ .probe = xsdirxss_probe,
+ .remove = xsdirxss_remove,
+};
+
+module_platform_driver(xsdirxss_driver);
+
+MODULE_AUTHOR("Vishal Sagar <vsagar@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx SDI Rx Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-switch.c b/drivers/media/platform/xilinx/xilinx-switch.c
new file mode 100644
index 000000000000..b0052a76c65d
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-switch.c
@@ -0,0 +1,460 @@
+/*
+ * Xilinx Video Switch
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XSW_CORE_CH_CTRL 0x0100
+#define XSW_CORE_CH_CTRL_FORCE (1 << 3)
+
+#define XSW_SWITCH_STATUS 0x0104
+
+/**
+ * struct xswitch_device - Xilinx Video Switch device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @nsinks: number of sink pads (2 to 8)
+ * @nsources: number of source pads (1 to 8)
+ * @routing: sink pad connected to each source pad (-1 if none)
+ * @formats: active V4L2 media bus formats on sink pads
+ */
+struct xswitch_device {
+ struct xvip_device xvip;
+
+ struct media_pad *pads;
+ unsigned int nsinks;
+ unsigned int nsources;
+
+ int routing[8];
+
+ struct v4l2_mbus_framefmt *formats;
+};
+
+static inline struct xswitch_device *to_xsw(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xswitch_device, xvip.subdev);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xsw_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ unsigned int unused_input;
+ unsigned int i;
+ u32 routing;
+
+ if (!enable) {
+ xvip_stop(&xsw->xvip);
+ return 0;
+ }
+
+ /*
+ * All outputs must be routed to an input. When less than 8 inputs are
+ * synthesized we can use input 7 for that purpose. Otherwise find an
+ * unused input to connect to unused outputs.
+ */
+ if (xsw->nsinks == 8) {
+ u32 mask;
+
+ for (i = 0, mask = 0xff; i < xsw->nsources; ++i) {
+ if (xsw->routing[i] != -1)
+ mask &= ~BIT(xsw->routing[i]);
+ }
+
+ /*
+ * If all inputs are used all outputs are also used. We don't
+ * need an unused input in that case, use a zero value.
+ */
+ unused_input = mask ? ffs(mask) - 1 : 0;
+ } else {
+ unused_input = 7;
+ }
+
+ /* Configure routing. */
+ for (i = 0, routing = 0; i < xsw->nsources; ++i) {
+ unsigned int route;
+
+ route = xsw->routing[i] == -1 ? unused_input : xsw->routing[i];
+ routing |= (XSW_CORE_CH_CTRL_FORCE | route)
+ << (i * 4);
+ }
+
+ xvip_write(&xsw->xvip, XSW_CORE_CH_CTRL, routing);
+
+ xvip_write(&xsw->xvip, XVIP_CTRL_CONTROL,
+ (((1 << xsw->nsources) - 1) << 4) |
+ XVIP_CTRL_CONTROL_SW_ENABLE);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+xsw_get_pad_format(struct xswitch_device *xsw,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xsw->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xsw->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xsw_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ int pad = fmt->pad;
+
+ if (pad >= xsw->nsinks) {
+ pad = xsw->routing[pad - xsw->nsinks];
+ if (pad < 0) {
+ memset(&fmt->format, 0, sizeof(fmt->format));
+ return 0;
+ }
+ }
+
+ fmt->format = *xsw_get_pad_format(xsw, cfg, pad, fmt->which);
+
+ return 0;
+}
+
+static int xsw_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* The source pad format is always identical to the sink pad format and
+ * can't be modified.
+ */
+ if (fmt->pad >= xsw->nsinks)
+ return xsw_get_format(subdev, cfg, fmt);
+
+ format = xsw_get_pad_format(xsw, cfg, fmt->pad, fmt->which);
+
+ format->code = fmt->format.code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XVIP_MIN_WIDTH, XVIP_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XVIP_MIN_HEIGHT, XVIP_MAX_HEIGHT);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int xsw_get_routing(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_routing *route)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ unsigned int i;
+
+ mutex_lock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ for (i = 0; i < min(xsw->nsources, route->num_routes); ++i) {
+ route->routes[i].sink = xsw->routing[i];
+ route->routes[i].source = i;
+ }
+
+ route->num_routes = xsw->nsources;
+
+ mutex_unlock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ return 0;
+}
+
+static int xsw_set_routing(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_routing *route)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ if (subdev->entity.stream_count) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ for (i = 0; i < xsw->nsources; ++i)
+ xsw->routing[i] = -1;
+
+ for (i = 0; i < route->num_routes; ++i)
+ xsw->routing[route->routes[i].source - xsw->nsinks] =
+ route->routes[i].sink;
+
+done:
+ mutex_unlock(&subdev->entity.graph_obj.mdev->graph_mutex);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+/**
+ * xsw_init_formats - Initialize formats on all pads
+ * @subdev: tpgper V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ *
+ * The function sets the format on pad 0 only. In two pads mode, this is the
+ * sink pad and the set format handler will propagate the format to the source
+ * pad. In one pad mode this is the source pad.
+ */
+static void xsw_init_formats(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ struct v4l2_subdev_format format;
+ unsigned int i;
+
+ for (i = 0; i < xsw->nsinks; ++i) {
+ memset(&format, 0, sizeof(format));
+
+ format.pad = 0;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.width = 1920;
+ format.format.height = 1080;
+
+ xsw_set_format(subdev, fh ? fh->pad : NULL, &format);
+ }
+}
+
+static int xsw_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ xsw_init_formats(subdev, fh);
+
+ return 0;
+}
+
+static int xsw_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xsw_video_ops = {
+ .s_stream = xsw_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xsw_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xsw_get_format,
+ .set_fmt = xsw_set_format,
+ .get_routing = xsw_get_routing,
+ .set_routing = xsw_set_routing,
+};
+
+static struct v4l2_subdev_ops xsw_ops = {
+ .video = &xsw_video_ops,
+ .pad = &xsw_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xsw_internal_ops = {
+ .open = xsw_open,
+ .close = xsw_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static bool xsw_has_route(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1)
+{
+ struct xswitch_device *xsw = container_of(entity, struct xswitch_device,
+ xvip.subdev.entity);
+ unsigned int sink0, sink1;
+
+ /* Two sinks are never connected together. */
+ if (pad0 < xsw->nsinks && pad1 < xsw->nsinks)
+ return false;
+
+ sink0 = pad0 < xsw->nsinks ? pad0 : xsw->routing[pad0 - xsw->nsinks];
+ sink1 = pad1 < xsw->nsinks ? pad1 : xsw->routing[pad1 - xsw->nsinks];
+
+ return sink0 == sink1;
+}
+
+static const struct media_entity_operations xsw_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .has_route = xsw_has_route,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xsw_parse_of(struct xswitch_device *xsw)
+{
+ struct device_node *node = xsw->xvip.dev->of_node;
+ int ret;
+
+ ret = of_property_read_u32(node, "#xlnx,inputs", &xsw->nsinks);
+ if (ret < 0) {
+ dev_err(xsw->xvip.dev, "missing or invalid #xlnx,%s property\n",
+ "inputs");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "#xlnx,outputs", &xsw->nsources);
+ if (ret < 0) {
+ dev_err(xsw->xvip.dev, "missing or invalid #xlnx,%s property\n",
+ "outputs");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xsw_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xswitch_device *xsw;
+ unsigned int npads;
+ unsigned int i;
+ int ret;
+
+ xsw = devm_kzalloc(&pdev->dev, sizeof(*xsw), GFP_KERNEL);
+ if (!xsw)
+ return -ENOMEM;
+
+ xsw->xvip.dev = &pdev->dev;
+
+ ret = xsw_parse_of(xsw);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xsw->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize V4L2 subdevice and media entity. Pad numbers depend on the
+ * number of pads.
+ */
+ npads = xsw->nsinks + xsw->nsources;
+ xsw->pads = devm_kzalloc(&pdev->dev, npads * sizeof(*xsw->pads),
+ GFP_KERNEL);
+ if (!xsw->pads)
+ goto error;
+
+ for (i = 0; i < xsw->nsinks; ++i)
+ xsw->pads[i].flags = MEDIA_PAD_FL_SINK;
+ for (; i < npads; ++i)
+ xsw->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ xsw->formats = devm_kzalloc(&pdev->dev,
+ xsw->nsinks * sizeof(*xsw->formats),
+ GFP_KERNEL);
+ if (!xsw->formats)
+ goto error;
+
+ for (i = 0; i < xsw->nsources; ++i)
+ xsw->routing[i] = i < xsw->nsinks ? i : -1;
+
+ subdev = &xsw->xvip.subdev;
+ v4l2_subdev_init(subdev, &xsw_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xsw_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xsw);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ subdev->entity.ops = &xsw_media_ops;
+
+ xsw_init_formats(subdev, NULL);
+
+ ret = media_entity_pads_init(&subdev->entity, npads, xsw->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xsw);
+
+ xvip_print_version(&xsw->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xsw->xvip);
+ return ret;
+}
+
+static int xsw_remove(struct platform_device *pdev)
+{
+ struct xswitch_device *xsw = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xsw->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xsw->xvip);
+
+ return 0;
+}
+
+static const struct of_device_id xsw_of_id_table[] = {
+ { .compatible = "xlnx,v-switch-1.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xsw_of_id_table);
+
+static struct platform_driver xsw_driver = {
+ .driver = {
+ .name = "xilinx-switch",
+ .of_match_table = xsw_of_id_table,
+ },
+ .probe = xsw_probe,
+ .remove = xsw_remove,
+};
+
+module_platform_driver(xsw_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx Video Switch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c
index ed01bedb5db6..f840bc098d9e 100644
--- a/drivers/media/platform/xilinx/xilinx-tpg.c
+++ b/drivers/media/platform/xilinx/xilinx-tpg.c
@@ -20,6 +20,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-subdev.h>
+#include "xilinx-hls-common.h"
#include "xilinx-vip.h"
#include "xilinx-vtc.h"
@@ -58,6 +59,36 @@
#define XTPG_BAYER_PHASE_BGGR 3
#define XTPG_BAYER_PHASE_OFF 4
+/* TPG v7 is a completely redesigned IP using Vivado HLS
+ * having a different AXI4-Lite interface
+ */
+#define XTPG_HLS_BG_PATTERN 0x0020
+#define XTPG_HLS_FG_PATTERN 0x0028
+#define XTPG_HLS_FG_PATTERN_CROSS_HAIR (1 << 1)
+#define XTPG_HLS_MASK_ID 0x0030
+#define XTPG_HLS_MOTION_SPEED 0x0038
+#define XTPG_HLS_COLOR_FORMAT 0x0040
+#define XTPG_HLS_COLOR_FORMAT_RGB 0
+#define XTPG_HLS_COLOR_FORMAT_YUV_444 1
+#define XTPG_HLS_COLOR_FORMAT_YUV_422 2
+#define XTPG_HLS_COLOR_FORMAT_YUV_420 3
+#define XTPG_HLS_CROSS_HAIR_HOR 0x0048
+#define XTPG_HLS_CROSS_HAIR_VER 0x0050
+#define XTPG_HLS_ZPLATE_HOR_CNTL_START 0x0058
+#define XTPG_HLS_ZPLATE_HOR_CNTL_DELTA 0x0060
+#define XTPG_HLS_ZPLATE_VER_CNTL_START 0x0068
+#define XTPG_HLS_ZPLATE_VER_CNTL_DELTA 0x0070
+#define XTPG_HLS_BOX_SIZE 0x0078
+#define XTPG_HLS_BOX_COLOR_RED_CB 0x0080
+#define XTPG_HLS_BOX_COLOR_GREEN_CR 0x0088
+#define XTPG_HLS_BOX_COLOR_BLUE_Y 0x0090
+#define XTPG_HLS_ENABLE_INPUT 0x0098
+#define XTPG_HLS_USE_INPUT_VID_STREAM (1 << 0)
+#define XTPG_HLS_PASS_THRU_START_X 0x00a0
+#define XTPG_HLS_PASS_THRU_START_Y 0x00a8
+#define XTPG_HLS_PASS_THRU_END_X 0x00b0
+#define XTPG_HLS_PASS_THRU_END_Y 0x00b8
+
/*
* The minimum blanking value is one clock cycle for the front porch, one clock
* cycle for the sync pulse and one clock cycle for the back porch.
@@ -67,6 +98,15 @@
#define XTPG_MIN_VBLANK 3
#define XTPG_MAX_VBLANK (XVTC_MAX_VSIZE - XVIP_MIN_HEIGHT)
+#define XTPG_MIN_WIDTH (64)
+#define XTPG_MIN_HEIGHT (64)
+#define XTPG_MAX_WIDTH (10328)
+#define XTPG_MAX_HEIGHT (7760)
+
+#define XTPG_MIN_PPC 1
+
+#define XTPG_MIN_FRM_INT 1
+
/**
* struct xtpg_device - Xilinx Test Pattern Generator device structure
* @xvip: Xilinx Video IP device
@@ -82,8 +122,15 @@
* @vblank: vertical blanking control
* @pattern: test pattern control
* @streaming: is the video stream active
+ * @is_hls: whether the IP core is HLS based
* @vtc: video timing controller
* @vtmux_gpio: video timing mux GPIO
+ * @rst_gpio: reset IP core GPIO
+ * @max_width: Maximum width supported by this instance
+ * @max_height: Maximum height supported by this instance
+ * @fi_d: frame interval denominator
+ * @fi_n: frame interval numerator
+ * @ppc: Pixels per clock control
*/
struct xtpg_device {
struct xvip_device xvip;
@@ -102,9 +149,17 @@ struct xtpg_device {
struct v4l2_ctrl *vblank;
struct v4l2_ctrl *pattern;
bool streaming;
+ bool is_hls;
struct xvtc_device *vtc;
struct gpio_desc *vtmux_gpio;
+ struct gpio_desc *rst_gpio;
+
+ u32 max_width;
+ u32 max_height;
+ u32 fi_d;
+ u32 fi_n;
+ u32 ppc;
};
static inline struct xtpg_device *to_tpg(struct v4l2_subdev *subdev)
@@ -128,6 +183,32 @@ static u32 xtpg_get_bayer_phase(unsigned int code)
}
}
+static void xtpg_config_vtc(struct xtpg_device *xtpg, int width, int height)
+{
+
+ struct xvtc_config config = {
+ .hblank_start = width / xtpg->ppc,
+ .hsync_start = width / xtpg->ppc + 1,
+ .vblank_start = height,
+ .vsync_start = height + 1,
+ .fps = xtpg->fi_d / xtpg->fi_n,
+ };
+ unsigned int htotal;
+ unsigned int vtotal;
+
+ htotal = min_t(unsigned int, XVTC_MAX_HSIZE,
+ (v4l2_ctrl_g_ctrl(xtpg->hblank) + width) / xtpg->ppc);
+ vtotal = min_t(unsigned int, XVTC_MAX_VSIZE,
+ v4l2_ctrl_g_ctrl(xtpg->vblank) + height);
+
+ config.hsync_end = htotal - 1;
+ config.hsize = htotal;
+ config.vsync_end = vtotal - 1;
+ config.vsize = vtotal;
+
+ xvtc_generator_start(xtpg->vtc, &config);
+}
+
static void __xtpg_update_pattern_control(struct xtpg_device *xtpg,
bool passthrough, bool pattern)
{
@@ -164,6 +245,33 @@ static void xtpg_update_pattern_control(struct xtpg_device *xtpg,
* V4L2 Subdevice Video Operations
*/
+static int xtpg_g_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+
+ fi->interval.numerator = xtpg->fi_n;
+ fi->interval.denominator = xtpg->fi_d;
+
+ return 0;
+}
+
+static int xtpg_s_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+
+ if (!fi->interval.numerator || !fi->interval.denominator) {
+ xtpg->fi_n = XTPG_MIN_FRM_INT;
+ xtpg->fi_d = XTPG_MIN_FRM_INT;
+ } else {
+ xtpg->fi_n = fi->interval.numerator;
+ xtpg->fi_d = fi->interval.denominator;
+ }
+
+ return 0;
+}
+
static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct xtpg_device *xtpg = to_tpg(subdev);
@@ -173,7 +281,20 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
u32 bayer_phase;
if (!enable) {
- xvip_stop(&xtpg->xvip);
+ if (!xtpg->is_hls) {
+ xvip_stop(&xtpg->xvip);
+ } else {
+ /*
+ * There is an known issue in TPG v7.0 that on
+ * resolution change it doesn't generates pattern
+ * correctly i.e some hor/ver offset is added.
+ * As a workaround issue reset on stop.
+ */
+ gpiod_set_value_cansleep(xtpg->rst_gpio, 0x1);
+ gpiod_set_value_cansleep(xtpg->rst_gpio, 0x0);
+ v4l2_ctrl_handler_setup(&xtpg->ctrl_handler);
+ }
+
if (xtpg->vtc)
xvtc_generator_stop(xtpg->vtc);
@@ -182,31 +303,36 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
return 0;
}
- xvip_set_frame_size(&xtpg->xvip, &xtpg->formats[0]);
-
- if (xtpg->vtc) {
- struct xvtc_config config = {
- .hblank_start = width,
- .hsync_start = width + 1,
- .vblank_start = height,
- .vsync_start = height + 1,
- };
- unsigned int htotal;
- unsigned int vtotal;
-
- htotal = min_t(unsigned int, XVTC_MAX_HSIZE,
- v4l2_ctrl_g_ctrl(xtpg->hblank) + width);
- vtotal = min_t(unsigned int, XVTC_MAX_VSIZE,
- v4l2_ctrl_g_ctrl(xtpg->vblank) + height);
-
- config.hsync_end = htotal - 1;
- config.hsize = htotal;
- config.vsync_end = vtotal - 1;
- config.vsize = vtotal;
-
- xvtc_generator_start(xtpg->vtc, &config);
+ if (xtpg->is_hls) {
+ u32 fmt = 0;
+
+ switch (xtpg->formats[0].code) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ fmt = XTPG_HLS_COLOR_FORMAT_YUV_420;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ fmt = XTPG_HLS_COLOR_FORMAT_YUV_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ fmt = XTPG_HLS_COLOR_FORMAT_YUV_444;
+ break;
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ fmt = XTPG_HLS_COLOR_FORMAT_RGB;
+ break;
+ }
+ xvip_write(&xtpg->xvip, XTPG_HLS_COLOR_FORMAT, fmt);
+ xvip_write(&xtpg->xvip, XHLS_REG_COLS, width);
+ xvip_write(&xtpg->xvip, XHLS_REG_ROWS, height);
+ } else {
+ xvip_set_frame_size(&xtpg->xvip, &xtpg->formats[0]);
}
+ if (xtpg->vtc)
+ xtpg_config_vtc(xtpg, width, height);
/*
* Configure the bayer phase and video timing mux based on the
* operation mode (passthrough or test pattern generation). The test
@@ -215,7 +341,11 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
*/
mutex_lock(xtpg->ctrl_handler.lock);
- xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_BG_PATTERN,
+ xtpg->pattern->cur.val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
XTPG_PATTERN_MASK, xtpg->pattern->cur.val);
/*
@@ -229,18 +359,26 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
mutex_unlock(xtpg->ctrl_handler.lock);
- /*
- * For TPG v5.0, the bayer phase needs to be off for the pass through
- * mode, otherwise the external input would be subsampled.
- */
- bayer_phase = passthrough ? XTPG_BAYER_PHASE_OFF
- : xtpg_get_bayer_phase(xtpg->formats[0].code);
- xvip_write(&xtpg->xvip, XTPG_BAYER_PHASE, bayer_phase);
-
if (xtpg->vtmux_gpio)
gpiod_set_value_cansleep(xtpg->vtmux_gpio, !passthrough);
- xvip_start(&xtpg->xvip);
+ if (xtpg->is_hls) {
+ xvip_set(&xtpg->xvip, XTPG_HLS_ENABLE_INPUT,
+ XTPG_HLS_USE_INPUT_VID_STREAM);
+ xvip_set(&xtpg->xvip, XVIP_CTRL_CONTROL,
+ XHLS_REG_CTRL_AUTO_RESTART |
+ XVIP_CTRL_CONTROL_SW_ENABLE);
+ } else {
+ /*
+ * For TPG v5.0, the bayer phase needs to be off for the pass
+ * through mode, otherwise the external input would
+ * be subsampled.
+ */
+ bayer_phase = passthrough ? XTPG_BAYER_PHASE_OFF
+ : xtpg_get_bayer_phase(xtpg->formats[0].code);
+ xvip_write(&xtpg->xvip, XTPG_BAYER_PHASE, bayer_phase);
+ xvip_start(&xtpg->xvip);
+ }
return 0;
}
@@ -300,7 +438,27 @@ static int xtpg_set_format(struct v4l2_subdev *subdev,
__format->code = fmt->format.code;
}
- xvip_set_format_size(__format, fmt);
+ if (xtpg->is_hls) {
+ switch (fmt->format.code) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ __format->code = fmt->format.code;
+ break;
+ default:
+ __format->code = xtpg->default_format.code;
+ }
+ }
+
+ __format->width = clamp_t(unsigned int, fmt->format.width,
+ XTPG_MIN_WIDTH, xtpg->max_width);
+ __format->height = clamp_t(unsigned int, fmt->format.height,
+ XTPG_MIN_HEIGHT, xtpg->max_height);
fmt->format = *__format;
@@ -322,6 +480,7 @@ static int xtpg_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_frame_size_enum *fse)
{
struct v4l2_mbus_framefmt *format;
+ struct xtpg_device *xtpg = to_tpg(subdev);
format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
@@ -330,12 +489,13 @@ static int xtpg_enum_frame_size(struct v4l2_subdev *subdev,
/* Min / max values for pad 0 is always fixed in both one and two pads
* modes. In two pads mode, the source pad(= 1) size is identical to
- * the sink pad size */
+ * the sink pad size.
+ */
if (fse->pad == 0) {
- fse->min_width = XVIP_MIN_WIDTH;
- fse->max_width = XVIP_MAX_WIDTH;
- fse->min_height = XVIP_MIN_HEIGHT;
- fse->max_height = XVIP_MAX_HEIGHT;
+ fse->min_width = XTPG_MIN_WIDTH;
+ fse->max_width = xtpg->max_width;
+ fse->min_height = XTPG_MIN_HEIGHT;
+ fse->max_height = xtpg->max_height;
} else {
fse->min_width = format->width;
fse->max_width = format->width;
@@ -374,8 +534,12 @@ static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
ctrl_handler);
switch (ctrl->id) {
case V4L2_CID_TEST_PATTERN:
- xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
- XTPG_PATTERN_MASK, ctrl->val);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_BG_PATTERN,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_MASK, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_CROSS_HAIRS:
xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
@@ -386,10 +550,13 @@ static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
XTPG_PATTERN_CONTROL_MOVING_BOX, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_COLOR_MASK:
- xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
- XTPG_PATTERN_CONTROL_COLOR_MASK_MASK,
- ctrl->val <<
- XTPG_PATTERN_CONTROL_COLOR_MASK_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_MASK_ID, ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_COLOR_MASK_MASK,
+ ctrl->val <<
+ XTPG_PATTERN_CONTROL_COLOR_MASK_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_STUCK_PIXEL:
xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
@@ -404,43 +571,85 @@ static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
XTPG_PATTERN_CONTROL_MOTION, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_MOTION_SPEED:
- xvip_write(&xtpg->xvip, XTPG_MOTION_SPEED, ctrl->val);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_MOTION_SPEED,
+ ctrl->val);
+ else
+ xvip_write(&xtpg->xvip, XTPG_MOTION_SPEED, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_CROSS_HAIR_ROW:
- xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
- XTPG_CROSS_HAIRS_ROW_MASK,
- ctrl->val << XTPG_CROSS_HAIRS_ROW_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_CROSS_HAIR_HOR,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
+ XTPG_CROSS_HAIRS_ROW_MASK,
+ ctrl->val <<
+ XTPG_CROSS_HAIRS_ROW_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_CROSS_HAIR_COLUMN:
- xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
- XTPG_CROSS_HAIRS_COLUMN_MASK,
- ctrl->val << XTPG_CROSS_HAIRS_COLUMN_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_CROSS_HAIR_VER,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
+ XTPG_CROSS_HAIRS_COLUMN_MASK,
+ ctrl->val <<
+ XTPG_CROSS_HAIRS_COLUMN_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_ZPLATE_HOR_START:
- xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
- XTPG_ZPLATE_START_MASK,
- ctrl->val << XTPG_ZPLATE_START_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_ZPLATE_HOR_CNTL_START,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
+ XTPG_ZPLATE_START_MASK,
+ ctrl->val << XTPG_ZPLATE_START_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_ZPLATE_HOR_SPEED:
- xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
- XTPG_ZPLATE_SPEED_MASK,
- ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_ZPLATE_HOR_CNTL_DELTA,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
+ XTPG_ZPLATE_SPEED_MASK,
+ ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_ZPLATE_VER_START:
- xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
- XTPG_ZPLATE_START_MASK,
- ctrl->val << XTPG_ZPLATE_START_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_ZPLATE_VER_CNTL_START,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
+ XTPG_ZPLATE_START_MASK,
+ ctrl->val << XTPG_ZPLATE_START_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_ZPLATE_VER_SPEED:
- xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
- XTPG_ZPLATE_SPEED_MASK,
- ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_ZPLATE_VER_CNTL_DELTA,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
+ XTPG_ZPLATE_SPEED_MASK,
+ ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_BOX_SIZE:
- xvip_write(&xtpg->xvip, XTPG_BOX_SIZE, ctrl->val);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_BOX_SIZE, ctrl->val);
+ else
+ xvip_write(&xtpg->xvip, XTPG_BOX_SIZE, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_BOX_COLOR:
- xvip_write(&xtpg->xvip, XTPG_BOX_COLOR, ctrl->val);
+ if (xtpg->is_hls) {
+ xvip_write(&xtpg->xvip, XTPG_HLS_BOX_COLOR_RED_CB,
+ ctrl->val >> 16);
+ xvip_write(&xtpg->xvip, XTPG_HLS_BOX_COLOR_GREEN_CR,
+ ctrl->val >> 8);
+ xvip_write(&xtpg->xvip, XTPG_HLS_BOX_COLOR_BLUE_Y,
+ ctrl->val);
+ } else {
+ xvip_write(&xtpg->xvip, XTPG_BOX_COLOR, ctrl->val);
+ }
return 0;
case V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH:
xvip_write(&xtpg->xvip, XTPG_STUCK_PIXEL_THRESH, ctrl->val);
@@ -448,6 +657,9 @@ static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_XILINX_TPG_NOISE_GAIN:
xvip_write(&xtpg->xvip, XTPG_NOISE_GAIN, ctrl->val);
return 0;
+ case V4L2_CID_XILINX_TPG_HLS_FG_PATTERN:
+ xvip_write(&xtpg->xvip, XTPG_HLS_FG_PATTERN, ctrl->val);
+ return 0;
}
return 0;
@@ -461,6 +673,8 @@ static const struct v4l2_subdev_core_ops xtpg_core_ops = {
};
static const struct v4l2_subdev_video_ops xtpg_video_ops = {
+ .g_frame_interval = xtpg_g_frame_interval,
+ .s_frame_interval = xtpg_s_frame_interval,
.s_stream = xtpg_s_stream,
};
@@ -505,60 +719,51 @@ static const char *const xtpg_pattern_strings[] = {
"Black/White Checker Board",
};
-static struct v4l2_ctrl_config xtpg_ctrls[] = {
+static const char *const xtpg_hls_pattern_strings[] = {
+ "Passthrough",
+ "Horizontal Ramp",
+ "Vertical Ramp",
+ "Temporal Ramp",
+ "Solid Red",
+ "Solid Green",
+ "Solid Blue",
+ "Solid Black",
+ "Solid White",
+ "Color Bars",
+ "Zone Plate",
+ "Tartan Color Bars",
+ "Cross Hatch",
+ "Color Sweep",
+ "Vertical/Horizontal Ramps",
+ "Black/White Checker Board",
+ "PseudoRandom",
+};
+
+static const char *const xtpg_hls_fg_strings[] = {
+ "No Overlay",
+ "Moving Box",
+ "Cross Hairs",
+};
+
+static const struct v4l2_ctrl_config xtpg_hls_fg_ctrl = {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_HLS_FG_PATTERN,
+ .name = "Test Pattern: Foreground Pattern",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = ARRAY_SIZE(xtpg_hls_fg_strings) - 1,
+ .qmenu = xtpg_hls_fg_strings,
+};
+
+static struct v4l2_ctrl_config xtpg_common_ctrls[] = {
{
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_CROSS_HAIRS,
- .name = "Test Pattern: Cross Hairs",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_MOVING_BOX,
- .name = "Test Pattern: Moving Box",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_COLOR_MASK,
- .name = "Test Pattern: Color Mask",
- .type = V4L2_CTRL_TYPE_BITMASK,
- .min = 0,
- .max = 0xf,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_STUCK_PIXEL,
- .name = "Test Pattern: Stuck Pixel",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_NOISE,
- .name = "Test Pattern: Noise",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_MOTION,
- .name = "Test Pattern: Motion",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_COLOR_MASK,
+ .name = "Test Pattern: Color Mask",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = 0x7,
+ .def = 0,
}, {
.ops = &xtpg_ctrl_ops,
.id = V4L2_CID_XILINX_TPG_MOTION_SPEED,
@@ -642,12 +847,61 @@ static struct v4l2_ctrl_config xtpg_ctrls[] = {
}, {
.ops = &xtpg_ctrl_ops,
.id = V4L2_CID_XILINX_TPG_BOX_COLOR,
- .name = "Test Pattern: Box Color(RGB)",
+ .name = "Test Pattern: Box Color(RGB/YCbCr)",
.type = V4L2_CTRL_TYPE_INTEGER,
.min = 0,
.max = (1 << 24) - 1,
.step = 1,
.def = 0,
+ },
+};
+
+static struct v4l2_ctrl_config xtpg_ctrls[] = {
+ {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_CROSS_HAIRS,
+ .name = "Test Pattern: Cross Hairs",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_MOVING_BOX,
+ .name = "Test Pattern: Moving Box",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_STUCK_PIXEL,
+ .name = "Test Pattern: Stuck Pixel",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_NOISE,
+ .name = "Test Pattern: Noise",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_MOTION,
+ .name = "Test Pattern: Motion",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
}, {
.ops = &xtpg_ctrl_ops,
.id = V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH,
@@ -713,6 +967,49 @@ static int xtpg_parse_of(struct xtpg_device *xtpg)
struct device_node *port;
unsigned int nports = 0;
bool has_endpoint = false;
+ int ret;
+
+ if (!of_device_is_compatible(dev->of_node, "xlnx,v-tpg-5.0"))
+ xtpg->is_hls = true;
+
+ ret = of_property_read_u32(node, "xlnx,max-height",
+ &xtpg->max_height);
+ if (ret < 0) {
+ if (of_device_is_compatible(dev->of_node, "xlnx,v-tpg-8.0")) {
+ dev_err(dev, "xlnx,max-height dt property is missing!");
+ return -EINVAL;
+ }
+ xtpg->max_height = XTPG_MAX_HEIGHT;
+ } else if (xtpg->max_height > XTPG_MAX_HEIGHT ||
+ xtpg->max_height < XTPG_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width",
+ &xtpg->max_width);
+ if (ret < 0) {
+ if (of_device_is_compatible(dev->of_node, "xlnx,v-tpg-8.0")) {
+ dev_err(dev, "xlnx,max-width dt property is missing!");
+ return -EINVAL;
+ }
+ xtpg->max_width = XTPG_MAX_WIDTH;
+ } else if (xtpg->max_width > XTPG_MAX_WIDTH ||
+ xtpg->max_width < XTPG_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,ppc",
+ &xtpg->ppc);
+ if (ret < 0) {
+ xtpg->ppc = XTPG_MIN_PPC;
+ dev_dbg(dev, "failed to read ppc in dt\n");
+ } else if ((xtpg->ppc != 1) && (xtpg->ppc != 2) &&
+ (xtpg->ppc != 4) && (xtpg->ppc != 8)) {
+ dev_err(dev, "Invalid ppc config in dt\n");
+ return -EINVAL;
+ }
ports = of_get_child_by_name(node, "ports");
if (ports == NULL)
@@ -769,6 +1066,7 @@ static int xtpg_probe(struct platform_device *pdev)
struct v4l2_subdev *subdev;
struct xtpg_device *xtpg;
u32 i, bayer_phase;
+ u32 npatterns;
int ret;
xtpg = devm_kzalloc(&pdev->dev, sizeof(*xtpg), GFP_KERNEL);
@@ -792,14 +1090,29 @@ static int xtpg_probe(struct platform_device *pdev)
goto error_resource;
}
+ if (xtpg->is_hls) {
+ xtpg->rst_gpio = devm_gpiod_get(&pdev->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(xtpg->rst_gpio)) {
+ ret = PTR_ERR(xtpg->rst_gpio);
+ goto error_resource;
+ }
+ }
+
xtpg->vtc = xvtc_of_get(pdev->dev.of_node);
if (IS_ERR(xtpg->vtc)) {
ret = PTR_ERR(xtpg->vtc);
goto error_resource;
}
- /* Reset and initialize the core */
- xvip_reset(&xtpg->xvip);
+ /*
+ * Reset and initialize the core. For TPG HLS version there
+ * is no SW_RESET bit hence using GPIO based reset.
+ */
+ if (xtpg->is_hls)
+ gpiod_set_value_cansleep(xtpg->rst_gpio, 0x0);
+ else
+ xvip_reset(&xtpg->xvip);
/* Initialize V4L2 subdevice and media entity. Pad numbers depend on the
* number of pads.
@@ -815,11 +1128,23 @@ static int xtpg_probe(struct platform_device *pdev)
xtpg->default_format.code = xtpg->vip_format->code;
xtpg->default_format.field = V4L2_FIELD_NONE;
xtpg->default_format.colorspace = V4L2_COLORSPACE_SRGB;
- xvip_get_frame_size(&xtpg->xvip, &xtpg->default_format);
- bayer_phase = xtpg_get_bayer_phase(xtpg->vip_format->code);
- if (bayer_phase != XTPG_BAYER_PHASE_OFF)
- xtpg->bayer = true;
+ if (xtpg->is_hls) {
+ npatterns = ARRAY_SIZE(xtpg_hls_pattern_strings);
+ xtpg->default_format.width = xvip_read(&xtpg->xvip,
+ XHLS_REG_COLS);
+ xtpg->default_format.height = xvip_read(&xtpg->xvip,
+ XHLS_REG_ROWS);
+ } else {
+ npatterns = ARRAY_SIZE(xtpg_pattern_strings);
+ xvip_get_frame_size(&xtpg->xvip, &xtpg->default_format);
+ }
+
+ if (!xtpg->is_hls) {
+ bayer_phase = xtpg_get_bayer_phase(xtpg->vip_format->code);
+ if (bayer_phase != XTPG_BAYER_PHASE_OFF)
+ xtpg->bayer = true;
+ }
xtpg->formats[0] = xtpg->default_format;
if (xtpg->npads == 2)
@@ -839,7 +1164,13 @@ static int xtpg_probe(struct platform_device *pdev)
if (ret < 0)
goto error;
- v4l2_ctrl_handler_init(&xtpg->ctrl_handler, 3 + ARRAY_SIZE(xtpg_ctrls));
+ if (xtpg->is_hls)
+ v4l2_ctrl_handler_init(&xtpg->ctrl_handler, 4 +
+ ARRAY_SIZE(xtpg_common_ctrls));
+ else
+ v4l2_ctrl_handler_init(&xtpg->ctrl_handler, 3 +
+ ARRAY_SIZE(xtpg_common_ctrls) +
+ ARRAY_SIZE(xtpg_ctrls));
xtpg->vblank = v4l2_ctrl_new_std(&xtpg->ctrl_handler, &xtpg_ctrl_ops,
V4L2_CID_VBLANK, XTPG_MIN_VBLANK,
@@ -847,19 +1178,41 @@ static int xtpg_probe(struct platform_device *pdev)
xtpg->hblank = v4l2_ctrl_new_std(&xtpg->ctrl_handler, &xtpg_ctrl_ops,
V4L2_CID_HBLANK, XTPG_MIN_HBLANK,
XTPG_MAX_HBLANK, 1, 100);
- xtpg->pattern = v4l2_ctrl_new_std_menu_items(&xtpg->ctrl_handler,
- &xtpg_ctrl_ops, V4L2_CID_TEST_PATTERN,
- ARRAY_SIZE(xtpg_pattern_strings) - 1,
- 1, 9, xtpg_pattern_strings);
- for (i = 0; i < ARRAY_SIZE(xtpg_ctrls); i++)
- v4l2_ctrl_new_custom(&xtpg->ctrl_handler, &xtpg_ctrls[i], NULL);
+ if (xtpg->is_hls) {
+ xtpg->pattern =
+ v4l2_ctrl_new_std_menu_items(&xtpg->ctrl_handler,
+ &xtpg_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ npatterns - 1,
+ 1, 9,
+ xtpg_hls_pattern_strings);
+ v4l2_ctrl_new_custom(&xtpg->ctrl_handler,
+ &xtpg_hls_fg_ctrl, NULL);
+ } else {
+ xtpg->pattern =
+ v4l2_ctrl_new_std_menu_items(&xtpg->ctrl_handler,
+ &xtpg_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ npatterns - 1,
+ 1, 9,
+ xtpg_pattern_strings);
+
+ for (i = 0; i < ARRAY_SIZE(xtpg_ctrls); i++)
+ v4l2_ctrl_new_custom(&xtpg->ctrl_handler,
+ &xtpg_ctrls[i], NULL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(xtpg_common_ctrls); i++)
+ v4l2_ctrl_new_custom(&xtpg->ctrl_handler,
+ &xtpg_common_ctrls[i], NULL);
if (xtpg->ctrl_handler.error) {
dev_err(&pdev->dev, "failed to add controls\n");
ret = xtpg->ctrl_handler.error;
goto error;
}
+
subdev->ctrl_handler = &xtpg->ctrl_handler;
xtpg_update_pattern_control(xtpg, true, true);
@@ -874,6 +1227,10 @@ static int xtpg_probe(struct platform_device *pdev)
xvip_print_version(&xtpg->xvip);
+ /* Initialize default frame interval */
+ xtpg->fi_n = 1;
+ xtpg->fi_d = 30;
+
ret = v4l2_async_register_subdev(subdev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register subdev\n");
@@ -909,6 +1266,8 @@ static SIMPLE_DEV_PM_OPS(xtpg_pm_ops, xtpg_pm_suspend, xtpg_pm_resume);
static const struct of_device_id xtpg_of_id_table[] = {
{ .compatible = "xlnx,v-tpg-5.0" },
+ { .compatible = "xlnx,v-tpg-7.0" },
+ { .compatible = "xlnx,v-tpg-8.0" },
{ }
};
MODULE_DEVICE_TABLE(of, xtpg_of_id_table);
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index 08a825c3a3f6..466d6f27af1d 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -24,22 +24,102 @@
*/
static const struct xvip_video_format xvip_video_formats[] = {
+ { XVIP_VF_YUV_420, 8, NULL, MEDIA_BUS_FMT_VYYUYY8_1X24,
+ 1, 12, V4L2_PIX_FMT_NV12, 2, 1, 1, 2, "4:2:0, semi-planar, YUV" },
+ { XVIP_VF_YUV_420, 8, NULL, MEDIA_BUS_FMT_VYYUYY8_1X24,
+ 1, 12, V4L2_PIX_FMT_NV12M, 2, 2, 1, 2, "4:2:0, 2-plane non-cont" },
+ { XVIP_VF_YUV_420, 10, NULL, MEDIA_BUS_FMT_VYYUYY10_4X20,
+ 1, 12, V4L2_PIX_FMT_XV15, 2, 1, 2, 2, "4:2:0, 10-bit 2-plane cont" },
+ { XVIP_VF_YUV_420, 10, NULL, MEDIA_BUS_FMT_VYYUYY10_4X20,
+ 1, 12, V4L2_PIX_FMT_XV15M, 2, 2, 1, 2, "4:2:0, 10-bit 2-plane non-cont" },
+ { XVIP_VF_YUV_420, 12, NULL, MEDIA_BUS_FMT_UYYVYY12_4X24,
+ 1, 12, V4L2_PIX_FMT_X012, 2, 1, 2, 2, "4:2:0, 12-bit 2-plane cont" },
+ { XVIP_VF_YUV_420, 12, NULL, MEDIA_BUS_FMT_UYYVYY12_4X24,
+ 1, 12, V4L2_PIX_FMT_X012M, 2, 2, 1, 2, "4:2:0, 12-bit 2-plane non-cont" },
+ { XVIP_VF_YUV_420, 16, NULL, MEDIA_BUS_FMT_UYYVYY16_4X32,
+ 2, 12, V4L2_PIX_FMT_X016, 2, 1, 2, 2, "4:2:0, 16-bit 2-plane cont" },
+ { XVIP_VF_YUV_420, 16, NULL, MEDIA_BUS_FMT_UYYVYY16_4X32,
+ 2, 12, V4L2_PIX_FMT_X016M, 2, 2, 1, 2, "4:2:0, 16-bit 2-plane non-cont" },
{ XVIP_VF_YUV_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
- 2, V4L2_PIX_FMT_YUYV, "4:2:2, packed, YUYV" },
+ 2, 16, V4L2_PIX_FMT_YUYV, 1, 1, 2, 1, "4:2:2, packed, YUYV" },
+ { XVIP_VF_VUY_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
+ 2, 16, V4L2_PIX_FMT_UYVY, 1, 1, 2, 1, "4:2:2, packed, UYVY" },
+ { XVIP_VF_YUV_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
+ 1, 16, V4L2_PIX_FMT_NV16, 2, 1, 1, 1, "4:2:2, semi-planar, YUV" },
+ { XVIP_VF_YUV_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
+ 1, 16, V4L2_PIX_FMT_NV16M, 2, 2, 1, 1, "4:2:2, 2-plane non-contiguous" },
+ { XVIP_VF_YUV_422, 10, NULL, MEDIA_BUS_FMT_UYVY10_1X20,
+ 1, 16, V4L2_PIX_FMT_XV20, 2, 1, 2, 1, "4:2:2, 10-bit 2-plane cont" },
+ { XVIP_VF_YUV_422, 10, NULL, MEDIA_BUS_FMT_UYVY10_1X20,
+ 1, 16, V4L2_PIX_FMT_XV20M, 2, 2, 1, 1, "4:2:2, 10-bit 2-plane non-cont" },
+ { XVIP_VF_YUV_422, 12, NULL, MEDIA_BUS_FMT_UYVY12_1X24,
+ 1, 16, V4L2_PIX_FMT_X212, 2, 1, 2, 1, "4:2:2, 12-bit 2-plane cont" },
+ { XVIP_VF_YUV_422, 12, NULL, MEDIA_BUS_FMT_UYVY12_1X24,
+ 1, 16, V4L2_PIX_FMT_X212M, 2, 2, 1, 1, "4:2:2, 12-bit 2-plane non-cont" },
+ { XVIP_VF_YUV_422, 16, NULL, MEDIA_BUS_FMT_UYVY12_1X24,
+ 2, 16, V4L2_PIX_FMT_X216, 2, 1, 2, 1, "4:2:2, 16-bit 2-plane cont" },
+ { XVIP_VF_YUV_422, 16, NULL, MEDIA_BUS_FMT_UYVY12_1X24,
+ 2, 16, V4L2_PIX_FMT_X216M, 2, 2, 1, 1, "4:2:2, 16-bit 2-plane non-cont" },
{ XVIP_VF_YUV_444, 8, NULL, MEDIA_BUS_FMT_VUY8_1X24,
- 3, V4L2_PIX_FMT_YUV444, "4:4:4, packed, YUYV" },
+ 3, 24, V4L2_PIX_FMT_VUY24, 1, 1, 1, 1, "4:4:4, packed, YUYV" },
+ { XVIP_VF_YUVX, 8, NULL, MEDIA_BUS_FMT_VUY8_1X24,
+ 4, 32, V4L2_PIX_FMT_XVUY32, 1, 1, 1, 1, "X:4:4:4, packed, YUYV" },
+ { XVIP_VF_YUVX, 10, NULL, MEDIA_BUS_FMT_VUY10_1X30,
+ 3, 32, V4L2_PIX_FMT_XVUY10, 1, 1, 1, 1, "2:10:10:10, packed, XVUY" },
+ { XVIP_VF_YUV_444, 12, NULL, MEDIA_BUS_FMT_VUY12_1X36,
+ 1, 24, V4L2_PIX_FMT_X412, 1, 1, 1, 1, "4:4:4, 12-bit 2-plane cont" },
+ { XVIP_VF_YUV_444, 12, NULL, MEDIA_BUS_FMT_VUY12_1X36,
+ 1, 24, V4L2_PIX_FMT_X412M, 1, 1, 1, 1, "4:4:4, 12-bit 2-plane non-cont" },
+ { XVIP_VF_YUV_444, 16, NULL, MEDIA_BUS_FMT_VUY16_1X48,
+ 2, 24, V4L2_PIX_FMT_X416, 1, 1, 1, 1, "4:4:4, 16-bit 2-plane cont" },
+ { XVIP_VF_YUV_444, 16, NULL, MEDIA_BUS_FMT_VUY16_1X48,
+ 2, 24, V4L2_PIX_FMT_X416M, 1, 1, 1, 1, "4:4:4, 16-bit 2-plane non-cont" },
+ { XVIP_VF_RBG, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
+ 3, 24, V4L2_PIX_FMT_BGR24, 1, 1, 1, 1, "24-bit RGB" },
{ XVIP_VF_RBG, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
- 3, 0, NULL },
+ 3, 24, V4L2_PIX_FMT_RGB24, 1, 1, 1, 1, "24-bit RGB" },
+ { XVIP_VF_BGRX, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
+ 4, 32, V4L2_PIX_FMT_BGRX32, 1, 1, 1, 1, "x:8:8:8 RGB w/8 bits padding" },
+ { XVIP_VF_XRGB, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
+ 4, 32, V4L2_PIX_FMT_XBGR32, 1, 1, 1, 1, "8:8:8:x RGBx w/8 bits padding" },
+ { XVIP_VF_XBGR, 10, NULL, MEDIA_BUS_FMT_RBG101010_1X30,
+ 3, 32, V4L2_PIX_FMT_XBGR30, 1, 1, 1, 1, "2:10:10:10, packed, XBGR" },
+ { XVIP_VF_XBGR, 12, NULL, MEDIA_BUS_FMT_RBG121212_1X36,
+ 3, 40, V4L2_PIX_FMT_XBGR40, 1, 1, 1, 1, "4:12:12:12, packed, XBGR" },
+ { XVIP_VF_RBG, 16, NULL, MEDIA_BUS_FMT_RBG161616_1X48,
+ 6, 48, V4L2_PIX_FMT_BGR48, 1, 1, 1, 1, "48-bit RGB" },
{ XVIP_VF_MONO_SENSOR, 8, "mono", MEDIA_BUS_FMT_Y8_1X8,
- 1, V4L2_PIX_FMT_GREY, "Greyscale 8-bit" },
+ 1, 8, V4L2_PIX_FMT_GREY, 1, 1, 1, 1, "Greyscale 8-bit" },
+ { XVIP_VF_Y_GREY, 10, NULL, MEDIA_BUS_FMT_Y10_1X10,
+ 4, 32, V4L2_PIX_FMT_XY10, 1, 1, 1, 1, "2:10:10:10, Grey, xY1Y2Y3Y4" },
+ { XVIP_VF_Y_GREY, 12, NULL, MEDIA_BUS_FMT_Y12_1X12,
+ 1, 12, V4L2_PIX_FMT_XY12, 1, 1, 1, 1, "4:12:12:12, packed, xY1Y2Y3" },
+ { XVIP_VF_Y_GREY, 16, NULL, MEDIA_BUS_FMT_Y16_1X16,
+ 2, 16, V4L2_PIX_FMT_Y16, 1, 1, 1, 1, "Greyscale 16-bit" },
{ XVIP_VF_MONO_SENSOR, 8, "rggb", MEDIA_BUS_FMT_SRGGB8_1X8,
- 1, V4L2_PIX_FMT_SRGGB8, "Bayer 8-bit RGGB" },
+ 1, 8, V4L2_PIX_FMT_SGRBG8, 1, 1, 1, 1, "Bayer 8-bit RGGB" },
{ XVIP_VF_MONO_SENSOR, 8, "grbg", MEDIA_BUS_FMT_SGRBG8_1X8,
- 1, V4L2_PIX_FMT_SGRBG8, "Bayer 8-bit GRBG" },
+ 1, 8, V4L2_PIX_FMT_SGRBG8, 1, 1, 1, 1, "Bayer 8-bit GRBG" },
{ XVIP_VF_MONO_SENSOR, 8, "gbrg", MEDIA_BUS_FMT_SGBRG8_1X8,
- 1, V4L2_PIX_FMT_SGBRG8, "Bayer 8-bit GBRG" },
+ 1, 8, V4L2_PIX_FMT_SGBRG8, 1, 1, 1, 1, "Bayer 8-bit GBRG" },
{ XVIP_VF_MONO_SENSOR, 8, "bggr", MEDIA_BUS_FMT_SBGGR8_1X8,
- 1, V4L2_PIX_FMT_SBGGR8, "Bayer 8-bit BGGR" },
+ 1, 8, V4L2_PIX_FMT_SBGGR8, 1, 1, 1, 1, "Bayer 8-bit BGGR" },
+ { XVIP_VF_MONO_SENSOR, 12, "rggb", MEDIA_BUS_FMT_SRGGB12_1X12,
+ 1, 12, V4L2_PIX_FMT_SRGGB12, 1, 1, 1, 1, "Bayer 12-bit RGGB" },
+ { XVIP_VF_MONO_SENSOR, 12, "grbg", MEDIA_BUS_FMT_SGRBG12_1X12,
+ 1, 12, V4L2_PIX_FMT_SGRBG12, 1, 1, 1, 1, "Bayer 12-bit GRBG" },
+ { XVIP_VF_MONO_SENSOR, 12, "gbrg", MEDIA_BUS_FMT_SGBRG12_1X12,
+ 1, 12, V4L2_PIX_FMT_SGBRG12, 1, 1, 1, 1, "Bayer 12-bit GBRG" },
+ { XVIP_VF_MONO_SENSOR, 12, "bggr", MEDIA_BUS_FMT_SBGGR12_1X12,
+ 1, 12, V4L2_PIX_FMT_SBGGR12, 1, 1, 1, 1, "Bayer 12-bit BGGR" },
+ { XVIP_VF_MONO_SENSOR, 16, "rggb", MEDIA_BUS_FMT_SRGGB16_1X16,
+ 1, 16, V4L2_PIX_FMT_SRGGB16, 1, 1, 1, 1, "Bayer 16-bit RGGB" },
+ { XVIP_VF_MONO_SENSOR, 16, "grbg", MEDIA_BUS_FMT_SGRBG16_1X16,
+ 1, 12, V4L2_PIX_FMT_SGRBG16, 1, 1, 1, 1, "Bayer 16-bit GRBG" },
+ { XVIP_VF_MONO_SENSOR, 16, "gbrg", MEDIA_BUS_FMT_SGBRG16_1X16,
+ 1, 12, V4L2_PIX_FMT_SGBRG16, 1, 1, 1, 1, "Bayer 16-bit GBRG" },
+ { XVIP_VF_MONO_SENSOR, 16, "bggr", MEDIA_BUS_FMT_SBGGR12_1X12,
+ 1, 12, V4L2_PIX_FMT_SBGGR16, 1, 1, 1, 1, "Bayer 16-bit BGGR" },
};
/**
@@ -89,6 +169,87 @@ const struct xvip_video_format *xvip_get_format_by_fourcc(u32 fourcc)
EXPORT_SYMBOL_GPL(xvip_get_format_by_fourcc);
/**
+ * xvip_bpl_scaling_factor - Retrieve bpl scaling factor for a 4CC
+ * @fourcc: the format 4CC
+ * @numerator: returning numerator of scaling factor
+ * @denominator: returning denominator of scaling factor
+ *
+ * Return: Return numerator and denominator values by address
+ */
+void xvip_bpl_scaling_factor(u32 fourcc, u32 *numerator, u32 *denominator)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_XY10:
+ case V4L2_PIX_FMT_XV15:
+ case V4L2_PIX_FMT_XV20:
+ case V4L2_PIX_FMT_XV15M:
+ case V4L2_PIX_FMT_XV20M:
+ case V4L2_PIX_FMT_XBGR30:
+ case V4L2_PIX_FMT_XVUY10:
+ *numerator = 10;
+ *denominator = 8;
+ break;
+ case V4L2_PIX_FMT_XBGR40:
+ case V4L2_PIX_FMT_XY12:
+ case V4L2_PIX_FMT_X012:
+ case V4L2_PIX_FMT_X012M:
+ case V4L2_PIX_FMT_X212:
+ case V4L2_PIX_FMT_X212M:
+ case V4L2_PIX_FMT_X412:
+ case V4L2_PIX_FMT_X412M:
+ *numerator = 12;
+ *denominator = 8;
+ break;
+ default:
+ *numerator = 1;
+ *denominator = 1;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(xvip_bpl_scaling_factor);
+
+/**
+ * xvip_width_padding_factor - Retrieve width's padding factor for a 4CC
+ * @fourcc: the format 4CC
+ * @numerator: returning numerator of padding factor
+ * @denominator: returning denominator of padding factor
+ *
+ * Return: Return numerator and denominator values by address
+ */
+void xvip_width_padding_factor(u32 fourcc, u32 *numerator, u32 *denominator)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_XY10:
+ case V4L2_PIX_FMT_XV15:
+ case V4L2_PIX_FMT_XV20:
+ case V4L2_PIX_FMT_XV15M:
+ case V4L2_PIX_FMT_XV20M:
+ case V4L2_PIX_FMT_XBGR30:
+ case V4L2_PIX_FMT_XVUY10:
+ /* 32 bits are required per 30 bits of data */
+ *numerator = 32;
+ *denominator = 30;
+ break;
+ case V4L2_PIX_FMT_XBGR40:
+ case V4L2_PIX_FMT_XY12:
+ case V4L2_PIX_FMT_X012:
+ case V4L2_PIX_FMT_X012M:
+ case V4L2_PIX_FMT_X212:
+ case V4L2_PIX_FMT_X212M:
+ case V4L2_PIX_FMT_X412:
+ case V4L2_PIX_FMT_X412M:
+ *numerator = 40;
+ *denominator = 36;
+ break;
+ default:
+ *numerator = 1;
+ *denominator = 1;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(xvip_width_padding_factor);
+
+/**
* xvip_of_get_format - Parse a device tree node and return format information
* @node: the device tree node
*
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
index ba939dd52818..55b994f0c26e 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.h
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -106,8 +106,13 @@ struct xvip_device {
* @width: AXI4 format width in bits per component
* @pattern: CFA pattern for Mono/Sensor formats
* @code: media bus format code
- * @bpp: bytes per pixel (when stored in memory)
+ * @bpl_factor: Bytes per line factor
+ * @bpp: bits per pixel
* @fourcc: V4L2 pixel format FCC identifier
+ * @num_planes: number of planes w.r.t. color format
+ * @buffers: number of buffers per format
+ * @hsub: Horizontal sampling factor of Chroma
+ * @vsub: Vertical sampling factor of Chroma
* @description: format description, suitable for userspace
*/
struct xvip_video_format {
@@ -115,14 +120,21 @@ struct xvip_video_format {
unsigned int width;
const char *pattern;
unsigned int code;
+ unsigned int bpl_factor;
unsigned int bpp;
u32 fourcc;
+ u8 num_planes;
+ u8 buffers;
+ u8 hsub;
+ u8 vsub;
const char *description;
};
const struct xvip_video_format *xvip_get_format_by_code(unsigned int code);
const struct xvip_video_format *xvip_get_format_by_fourcc(u32 fourcc);
const struct xvip_video_format *xvip_of_get_format(struct device_node *node);
+void xvip_bpl_scaling_factor(u32 fourcc, u32 *numerator, u32 *denominator);
+void xvip_width_padding_factor(u32 fourcc, u32 *numerator, u32 *denominator);
void xvip_set_format_size(struct v4l2_mbus_framefmt *format,
const struct v4l2_subdev_format *fmt);
int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index edce0402155d..7890cc11632f 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -27,16 +27,27 @@
#define XVIPP_DMA_S2MM 0
#define XVIPP_DMA_MM2S 1
+/*
+ * This is for backward compatibility for existing applications,
+ * and planned to be deprecated
+ */
+static bool xvip_is_mplane = true;
+MODULE_PARM_DESC(is_mplane,
+ "v4l2 device capability to handle multi planar formats");
+module_param_named(is_mplane, xvip_is_mplane, bool, 0444);
+
/**
* struct xvip_graph_entity - Entity in the video graph
* @asd: subdev asynchronous registration information
* @entity: media entity, from the corresponding V4L2 subdev
* @subdev: V4L2 subdev
+ * @streaming: status of the V4L2 subdev if streaming or not
*/
struct xvip_graph_entity {
struct v4l2_async_subdev asd; /* must be first */
struct media_entity *entity;
struct v4l2_subdev *subdev;
+ bool streaming;
};
static inline struct xvip_graph_entity *
@@ -182,6 +193,38 @@ xvip_graph_find_dma(struct xvip_composite_device *xdev, unsigned int port)
return NULL;
}
+/**
+ * xvip_subdev_set_streaming - Find and update streaming status of subdev
+ * @xdev: Composite video device
+ * @subdev: V4L2 sub-device
+ * @enable: enable/disable streaming status
+ *
+ * Walk the xvip graph entities list and find if subdev is present. Returns
+ * streaming status of subdev and update the status as requested
+ *
+ * Return: streaming status (true or false) if successful or warn_on if subdev
+ * is not present and return false
+ */
+bool xvip_subdev_set_streaming(struct xvip_composite_device *xdev,
+ struct v4l2_subdev *subdev, bool enable)
+{
+ struct xvip_graph_entity *entity;
+ struct v4l2_async_subdev *asd;
+
+ list_for_each_entry(asd, &xdev->notifier.asd_list, asd_list) {
+ entity = to_xvip_entity(asd);
+ if (entity->asd.match.fwnode == of_fwnode_handle(subdev->dev->of_node)) {
+ bool status = entity->streaming;
+
+ entity->streaming = enable;
+ return status;
+ }
+ }
+
+ WARN(1, "Should never get here\n");
+ return false;
+}
+
static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
{
u32 link_flags = MEDIA_LNK_FL_ENABLED;
@@ -276,7 +319,6 @@ static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
}
}
- of_node_put(ep);
return ret;
}
@@ -442,9 +484,11 @@ static int xvip_graph_dma_init_one(struct xvip_composite_device *xdev,
return ret;
if (strcmp(direction, "input") == 0)
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ type = xvip_is_mplane ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE;
else if (strcmp(direction, "output") == 0)
- type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ type = xvip_is_mplane ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_OUTPUT;
else
return -EINVAL;
@@ -462,8 +506,14 @@ static int xvip_graph_dma_init_one(struct xvip_composite_device *xdev,
list_add_tail(&dma->list, &xdev->dmas);
- xdev->v4l2_caps |= type == V4L2_BUF_TYPE_VIDEO_CAPTURE
- ? V4L2_CAP_VIDEO_CAPTURE : V4L2_CAP_VIDEO_OUTPUT;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ xdev->v4l2_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ xdev->v4l2_caps |= V4L2_CAP_VIDEO_CAPTURE;
+ else if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ xdev->v4l2_caps |= V4L2_CAP_VIDEO_OUTPUT;
+ else if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ xdev->v4l2_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
return 0;
}
@@ -594,6 +644,7 @@ static int xvip_composite_probe(struct platform_device *pdev)
return -ENOMEM;
xdev->dev = &pdev->dev;
+ mutex_init(&xdev->lock);
INIT_LIST_HEAD(&xdev->dmas);
v4l2_async_notifier_init(&xdev->notifier);
@@ -620,6 +671,7 @@ static int xvip_composite_remove(struct platform_device *pdev)
{
struct xvip_composite_device *xdev = platform_get_drvdata(pdev);
+ mutex_destroy(&xdev->lock);
xvip_graph_cleanup(xdev);
xvip_composite_v4l2_cleanup(xdev);
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.h b/drivers/media/platform/xilinx/xilinx-vipp.h
index e65fce9538f9..24934d57529b 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.h
+++ b/drivers/media/platform/xilinx/xilinx-vipp.h
@@ -27,6 +27,7 @@
* @notifier: V4L2 asynchronous subdevs notifier
* @dmas: list of DMA channels at the pipeline output and input
* @v4l2_caps: V4L2 capabilities of the whole device (see VIDIOC_QUERYCAP)
+ * @lock: This is to ensure all dma path entities acquire same pipeline object
*/
struct xvip_composite_device {
struct v4l2_device v4l2_dev;
@@ -37,6 +38,10 @@ struct xvip_composite_device {
struct list_head dmas;
u32 v4l2_caps;
+ struct mutex lock; /* lock to protect xvip pipeline instance */
};
+bool xvip_subdev_set_streaming(struct xvip_composite_device *xdev,
+ struct v4l2_subdev *subdev, bool enable);
+
#endif /* __XILINX_VIPP_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-vpss-csc.c b/drivers/media/platform/xilinx/xilinx-vpss-csc.c
new file mode 100644
index 000000000000..2c79b6ec1b8b
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vpss-csc.c
@@ -0,0 +1,1169 @@
+/*
+ * Xilinx VPSS Color Space Converter
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XV_CSC_AP_CTRL (0x000)
+#define XV_CSC_INVIDEOFORMAT (0x010)
+#define XV_CSC_OUTVIDEOFORMAT (0x018)
+#define XV_CSC_WIDTH (0x020)
+#define XV_CSC_HEIGHT (0x028)
+#define XV_CSC_K11 (0x050)
+#define XV_CSC_K12 (0x058)
+#define XV_CSC_K13 (0x060)
+#define XV_CSC_K21 (0x068)
+#define XV_CSC_K22 (0x070)
+#define XV_CSC_K23 (0x078)
+#define XV_CSC_K31 (0x080)
+#define XV_CSC_K32 (0x088)
+#define XV_CSC_K33 (0x090)
+#define XV_CSC_ROFFSET (0x098)
+#define XV_CSC_GOFFSET (0x0a0)
+#define XV_CSC_BOFFSET (0x0a8)
+#define XV_CSC_CLAMPMIN (0x0b0)
+#define XV_CSC_CLIPMAX (0x0b8)
+
+#define XV_CSC_FRACTIONAL_BITS (12)
+#define XV_CSC_SCALE_FACTOR (4096)
+/* This a VPSS CSC specific macro used to calculate Contrast */
+#define XV_CSC_DIVISOR (10000)
+#define XV_CSC_DEFAULT_HEIGHT (720)
+#define XV_CSC_DEFAULT_WIDTH (1280)
+#define XV_CSC_K_MAX_ROWS (3)
+#define XV_CSC_K_MAX_COLUMNS (3)
+#define XV_CSC_MIN_WIDTH (64)
+#define XV_CSC_MAX_WIDTH (8192)
+#define XV_CSC_MIN_HEIGHT (64)
+#define XV_CSC_MAX_HEIGHT (4320)
+
+/* GPIO Reset Assert/De-assert */
+#define XCSC_RESET_ASSERT (1)
+#define XCSC_RESET_DEASSERT (0)
+/* Streaming Macros */
+#define XCSC_CLAMP_MIN_ZERO (0)
+#define XCSC_AP_START BIT(0)
+#define XCSC_AP_AUTO_RESTART BIT(7)
+#define XCSC_STREAM_ON (XCSC_AP_START | XCSC_AP_AUTO_RESTART)
+/* Color Control Macros */
+#define XCSC_COLOR_CTRL_COUNT (5)
+#define XCSC_COLOR_CTRL_DEFAULT (50)
+
+enum xcsc_color_fmt {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+enum xcsc_output_range {
+ XVIDC_CR_0_255 = 1,
+ XVIDC_CR_16_240,
+ XVIDC_CR_16_235
+};
+
+enum xcsc_color_depth {
+ XVIDC_BPC_8 = 8,
+ XVIDC_BPC_10 = 10,
+};
+
+static const s32
+rgb_unity_matrix[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = {
+ {XV_CSC_SCALE_FACTOR, 0, 0, 0},
+ {0, XV_CSC_SCALE_FACTOR, 0, 0},
+ {0, 0, XV_CSC_SCALE_FACTOR, 0},
+};
+
+static const s32
+ycrcb_to_rgb_unity[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = {
+ {
+ 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0,
+ 17927 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0
+ },
+ {
+ 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -2132 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -5329 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0
+ },
+ {
+ 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 21124 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0,
+ 0
+ },
+};
+
+static const s32
+rgb_to_ycrcb_unity[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = {
+ {
+ 1826 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 6142 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 620 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0
+ },
+ {
+ -1006 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -3386 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0
+ },
+ {
+ 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -3989 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -403 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0,
+ },
+};
+
+/**
+ * struct xcsc_dev - xilinx vpss csc device structure
+ * @xvip: Xilinx Video IP core struct
+ * @pads: Media bus pads for VPSS CSC
+ * @formats: Current media bus formats
+ * @default_formats: Default media bus formats for VPSS CSC
+ * @vip_formats: Pointer to DT specified media bus code info
+ * @ctrl_handler: V4L2 Control Handler struct
+ * @custom_ctrls: Array of pointers to various custom controls
+ * @cft_in: IP or Hardware specific input video format
+ * @cft_out: IP or Hardware specific output video format
+ * @output_range: Color range for Outgoing video
+ * @color_depth: Data width used to represent color
+ * @brightness: Expected brightness value
+ * @contrast: Expected contrast value
+ * @red_gain: Expected red gain
+ * @green_gain: Expect green gain
+ * @blue_gain: Expected blue gain
+ * @brightness_active: Current brightness value
+ * @contrast_active: Current contrast value
+ * @red_gain_active: Current red gain
+ * @green_gain_active: Current green gain
+ * @blue_gain_active: Current blue gain
+ * @k_hw : Coefficients to be written to IP/Hardware
+ * @shadow_coeff: Coefficients to track RGB equivalents for color controls
+ * @clip_max: Maximum value to clip output color range
+ * @rst_gpio: Handle to PS GPIO specifier to assert/de-assert the reset line
+ * @max_width: Maximum width supported by IP.
+ * @max_height: Maximum height supported by IP.
+ */
+struct xcsc_dev {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *custom_ctrls[XCSC_COLOR_CTRL_COUNT];
+
+ enum xcsc_color_fmt cft_in;
+ enum xcsc_color_fmt cft_out;
+ enum xcsc_output_range output_range;
+ enum xcsc_color_depth color_depth;
+ s32 brightness;
+ s32 contrast;
+ s32 red_gain;
+ s32 green_gain;
+ s32 blue_gain;
+ s32 brightness_active;
+ s32 contrast_active;
+ s32 red_gain_active;
+ s32 green_gain_active;
+ s32 blue_gain_active;
+ s32 k_hw[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1];
+ s32 shadow_coeff[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1];
+ s32 clip_max;
+ struct gpio_desc *rst_gpio;
+ u32 max_width;
+ u32 max_height;
+};
+
+#ifdef DEBUG
+static u32 xcsc_read(struct xcsc_dev *xcsc, u32 reg)
+{
+ u32 data;
+
+ data = xvip_read(&xcsc->xvip, reg);
+ return data;
+}
+
+static void xcsc_get_coeff(struct xcsc_dev *xcsc, s32 C[3][4])
+{
+ C[0][0] = xcsc_read(xcsc, XV_CSC_K11);
+ C[0][1] = xcsc_read(xcsc, XV_CSC_K12);
+ C[0][2] = xcsc_read(xcsc, XV_CSC_K13);
+ C[1][0] = xcsc_read(xcsc, XV_CSC_K21);
+ C[1][1] = xcsc_read(xcsc, XV_CSC_K22);
+ C[1][2] = xcsc_read(xcsc, XV_CSC_K23);
+ C[2][0] = xcsc_read(xcsc, XV_CSC_K31);
+ C[2][1] = xcsc_read(xcsc, XV_CSC_K32);
+ C[2][2] = xcsc_read(xcsc, XV_CSC_K33);
+ C[0][3] = xcsc_read(xcsc, XV_CSC_ROFFSET);
+ C[1][3] = xcsc_read(xcsc, XV_CSC_GOFFSET);
+ C[2][3] = xcsc_read(xcsc, XV_CSC_BOFFSET);
+}
+
+static void xcsc_print_coeff(struct xcsc_dev *xcsc)
+{
+ s32 C[3][4];
+
+ xcsc_get_coeff(xcsc, C);
+
+ dev_info(xcsc->xvip.dev,
+ "-------------CSC Coeff Dump Start------\n");
+ dev_info(xcsc->xvip.dev,
+ " R row : %5d %5d %5d\n",
+ (s16)C[0][0], (s16)C[0][1], (s16)C[0][2]);
+ dev_info(xcsc->xvip.dev,
+ " G row : %5d %5d %5d\n",
+ (s16)C[1][0], (s16)C[1][1], (s16)C[1][2]);
+ dev_info(xcsc->xvip.dev,
+ " B row : %5d %5d %5d\n",
+ (s16)C[2][0], (s16)C[2][1], (s16)C[2][2]);
+ dev_info(xcsc->xvip.dev,
+ "Offset : %5d %5d %5d\n",
+ (s16)C[0][3], (s16)C[1][3], (s16)C[2][3]);
+ dev_info(xcsc->xvip.dev,
+ "ClampMin: %3d ClipMax %3d",
+ xcsc_read(xcsc, XV_CSC_CLAMPMIN),
+ xcsc_read(xcsc, XV_CSC_CLIPMAX));
+ dev_info(xcsc->xvip.dev,
+ "-------------CSC Coeff Dump Stop-------\n");
+}
+
+static void
+xcsc_log_coeff(struct device *dev,
+ s32 coeff[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ if (!dev)
+ return;
+ dev_dbg(dev, "--- %s : Start Coeff Log ---", __func__);
+ dev_dbg(dev, "R row : %5d %5d %5d\n",
+ coeff[0][0], coeff[0][1], coeff[0][2]);
+ dev_dbg(dev, "G row : %5d %5d %5d\n",
+ coeff[1][0], coeff[1][1], coeff[1][2]);
+ dev_dbg(dev, "B row : %5d %5d %5d\n",
+ coeff[2][0], coeff[2][1], coeff[2][2]);
+ dev_dbg(dev, "Offset: %5d %5d %5d\n",
+ coeff[0][3], coeff[1][3], coeff[2][3]);
+ dev_dbg(dev, "--- %s : Stop Coeff Log ---", __func__);
+}
+
+static void xcsc_print_k_hw(struct xcsc_dev *xcsc)
+{
+ dev_dbg(xcsc->xvip.dev,
+ "-------------CSC Driver k_hw[][] Dump------------\n");
+ xcsc_log_coeff(xcsc->xvip.dev, xcsc->k_hw);
+ dev_dbg(xcsc->xvip.dev,
+ "-------------------------------------------------\n");
+}
+#endif /* DEBUG */
+
+static void xcsc_write(struct xcsc_dev *xcsc, u32 reg, u32 data)
+{
+ xvip_write(&xcsc->xvip, reg, data);
+}
+
+static void xcsc_write_rgb_3x3(struct xcsc_dev *xcsc)
+{
+ /* Write Matrix Coefficients */
+ xcsc_write(xcsc, XV_CSC_K11, xcsc->k_hw[0][0]);
+ xcsc_write(xcsc, XV_CSC_K12, xcsc->k_hw[0][1]);
+ xcsc_write(xcsc, XV_CSC_K13, xcsc->k_hw[0][2]);
+ xcsc_write(xcsc, XV_CSC_K21, xcsc->k_hw[1][0]);
+ xcsc_write(xcsc, XV_CSC_K22, xcsc->k_hw[1][1]);
+ xcsc_write(xcsc, XV_CSC_K23, xcsc->k_hw[1][2]);
+ xcsc_write(xcsc, XV_CSC_K31, xcsc->k_hw[2][0]);
+ xcsc_write(xcsc, XV_CSC_K32, xcsc->k_hw[2][1]);
+ xcsc_write(xcsc, XV_CSC_K33, xcsc->k_hw[2][2]);
+}
+
+static void xcsc_write_rgb_offset(struct xcsc_dev *xcsc)
+{
+ /* Write RGB Offsets */
+ xcsc_write(xcsc, XV_CSC_ROFFSET, xcsc->k_hw[0][3]);
+ xcsc_write(xcsc, XV_CSC_GOFFSET, xcsc->k_hw[1][3]);
+ xcsc_write(xcsc, XV_CSC_BOFFSET, xcsc->k_hw[2][3]);
+}
+
+static void xcsc_write_coeff(struct xcsc_dev *xcsc)
+{
+ xcsc_write_rgb_3x3(xcsc);
+ xcsc_write_rgb_offset(xcsc);
+}
+
+static void xcsc_set_v4l2_ctrl_defaults(struct xcsc_dev *xcsc)
+{
+ unsigned int i;
+
+ mutex_lock(xcsc->ctrl_handler.lock);
+ for (i = 0; i < XCSC_COLOR_CTRL_COUNT; i++)
+ xcsc->custom_ctrls[i]->cur.val = XCSC_COLOR_CTRL_DEFAULT;
+ mutex_unlock(xcsc->ctrl_handler.lock);
+}
+
+static void xcsc_set_control_defaults(struct xcsc_dev *xcsc)
+{
+ /* These are VPSS CSC IP specific defaults */
+ xcsc->brightness = 120;
+ xcsc->contrast = 0;
+ xcsc->red_gain = 120;
+ xcsc->blue_gain = 120;
+ xcsc->green_gain = 120;
+ xcsc->brightness_active = 120;
+ xcsc->contrast_active = 0;
+ xcsc->red_gain_active = 120;
+ xcsc->blue_gain_active = 120;
+ xcsc->green_gain_active = 120;
+}
+
+static void xcsc_copy_coeff(
+ s32 dest[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1],
+ s32 const src[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ unsigned int i, j;
+
+ for (i = 0; i < XV_CSC_K_MAX_ROWS; i++)
+ for (j = 0; j < XV_CSC_K_MAX_COLUMNS + 1; j++)
+ memcpy(&dest[i][j], &src[i][j], sizeof(dest[0][0]));
+}
+
+static void xcsc_set_unity_matrix(struct xcsc_dev *xcsc)
+{
+ xcsc_copy_coeff(xcsc->k_hw, rgb_unity_matrix);
+ xcsc_copy_coeff(xcsc->shadow_coeff, rgb_unity_matrix);
+}
+
+static void xcsc_set_default_state(struct xcsc_dev *xcsc)
+{
+ xcsc->cft_in = XVIDC_CSF_RGB;
+ xcsc->cft_out = XVIDC_CSF_RGB;
+ xcsc->output_range = XVIDC_CR_0_255;
+ /* Needed to add 10, 12 and 16 bit color depth support */
+ xcsc->clip_max = BIT(xcsc->color_depth) - 1;
+ xcsc_set_control_defaults(xcsc);
+ xcsc_set_unity_matrix(xcsc);
+ xcsc_write(xcsc, XV_CSC_INVIDEOFORMAT, xcsc->cft_in);
+ xcsc_write(xcsc, XV_CSC_OUTVIDEOFORMAT, xcsc->cft_out);
+ xcsc_write_coeff(xcsc);
+ xcsc_write(xcsc, XV_CSC_CLIPMAX, xcsc->clip_max);
+ xcsc_write(xcsc, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+}
+
+static void
+xcsc_ycrcb_to_rgb(struct xcsc_dev *xcsc, s32 *clip_max,
+ s32 temp[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ u16 bpc_scale = BIT(xcsc->color_depth - 8);
+
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC IP is
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations
+ *
+ * Coefficients valid only for BT 709
+ */
+ dev_dbg(xcsc->xvip.dev, "Performing YCrCb to RGB BT 709");
+ xcsc_copy_coeff(temp, ycrcb_to_rgb_unity);
+ temp[0][3] = -248 * bpc_scale;
+ temp[1][3] = 77 * bpc_scale;
+ temp[2][3] = -289 * bpc_scale;
+ *clip_max = BIT(xcsc->color_depth) - 1;
+}
+
+static void
+xcsc_matrix_multiply(s32 K1[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1],
+ s32 K2[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1],
+ s32 kout[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ s32 A, B, C, D, E, F, G, H, I, J, K, L, M, N;
+ s32 O, P, Q, R, S, T, U, V, W, X;
+
+ A = K1[0][0]; B = K1[0][1]; C = K1[0][2]; J = K1[0][3];
+ D = K1[1][0]; E = K1[1][1]; F = K1[1][2]; K = K1[1][3];
+ G = K1[2][0]; H = K1[2][1]; I = K1[2][2]; L = K1[2][3];
+
+ M = K2[0][0]; N = K2[0][1]; O = K2[0][2]; V = K2[0][3];
+ P = K2[1][0]; Q = K2[1][1]; R = K2[1][2]; W = K2[1][3];
+ S = K2[2][0]; T = K2[2][1]; U = K2[2][2]; X = K2[2][3];
+
+ kout[0][0] = (M * A + N * D + O * G) / XV_CSC_SCALE_FACTOR;
+ kout[0][1] = (M * B + N * E + O * H) / XV_CSC_SCALE_FACTOR;
+ kout[0][2] = (M * C + N * F + O * I) / XV_CSC_SCALE_FACTOR;
+ kout[1][0] = (P * A + Q * D + R * G) / XV_CSC_SCALE_FACTOR;
+ kout[1][1] = (P * B + Q * E + R * H) / XV_CSC_SCALE_FACTOR;
+ kout[1][2] = (P * C + Q * F + R * I) / XV_CSC_SCALE_FACTOR;
+ kout[2][0] = (S * A + T * D + U * G) / XV_CSC_SCALE_FACTOR;
+ kout[2][1] = (S * B + T * E + U * H) / XV_CSC_SCALE_FACTOR;
+ kout[2][2] = (S * C + T * F + U * I) / XV_CSC_SCALE_FACTOR;
+ kout[0][3] = ((M * J + N * K + O * L) / XV_CSC_SCALE_FACTOR) + V;
+ kout[1][3] = ((P * J + Q * K + R * L) / XV_CSC_SCALE_FACTOR) + W;
+ kout[2][3] = ((S * J + T * K + U * L) / XV_CSC_SCALE_FACTOR) + X;
+}
+
+static void
+xcsc_rgb_to_ycrcb(struct xcsc_dev *xcsc, s32 *clip_max,
+ s32 temp[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ u16 bpc_scale = BIT(xcsc->color_depth - 8);
+
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC IP is
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations
+ *
+ * Coefficients valid only for BT 709
+ */
+ dev_dbg(xcsc->xvip.dev, "Performing RGB to YCrCb BT 709");
+ xcsc_copy_coeff(temp, rgb_to_ycrcb_unity);
+ temp[0][3] = 16 * bpc_scale;
+ temp[1][3] = 128 * bpc_scale;
+ temp[2][3] = 128 * bpc_scale;
+ *clip_max = BIT(xcsc->color_depth) - 1;
+}
+
+static int xcsc_update_formats(struct xcsc_dev *xcsc)
+{
+ u32 color_in, color_out;
+
+ /* Write In and Out Video Formats */
+ color_in = xcsc->formats[XVIP_PAD_SINK].code;
+ color_out = xcsc->formats[XVIP_PAD_SOURCE].code;
+
+ switch (color_in) {
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ dev_dbg(xcsc->xvip.dev, "Media Format In : RGB");
+ xcsc->cft_in = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ dev_dbg(xcsc->xvip.dev, "Media Format In : YUV 444");
+ xcsc->cft_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ dev_dbg(xcsc->xvip.dev, "Media Format In : YUV 422");
+ xcsc->cft_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ dev_dbg(xcsc->xvip.dev, "Media Format In : YUV 420");
+ xcsc->cft_in = XVIDC_CSF_YCRCB_420;
+ break;
+ }
+
+ switch (color_out) {
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ xcsc->cft_out = XVIDC_CSF_RGB;
+ dev_dbg(xcsc->xvip.dev, "Media Format Out : RGB");
+ if (color_in != MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_ycrcb_to_rgb(xcsc, &xcsc->clip_max, xcsc->k_hw);
+ else
+ xcsc_set_unity_matrix(xcsc);
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ xcsc->cft_out = XVIDC_CSF_YCRCB_444;
+ dev_dbg(xcsc->xvip.dev, "Media Format Out : YUV 444");
+ if (color_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, xcsc->k_hw);
+ else
+ xcsc_set_unity_matrix(xcsc);
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ xcsc->cft_out = XVIDC_CSF_YCRCB_422;
+ dev_dbg(xcsc->xvip.dev, "Media Format Out : YUV 422");
+ if (color_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, xcsc->k_hw);
+ else
+ xcsc_set_unity_matrix(xcsc);
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ xcsc->cft_out = XVIDC_CSF_YCRCB_420;
+ dev_dbg(xcsc->xvip.dev, "Media Format Out : YUV 420");
+ if (color_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, xcsc->k_hw);
+ else
+ xcsc_set_unity_matrix(xcsc);
+ break;
+ }
+
+ xcsc_write(xcsc, XV_CSC_INVIDEOFORMAT, xcsc->cft_in);
+ xcsc_write(xcsc, XV_CSC_OUTVIDEOFORMAT, xcsc->cft_out);
+
+ xcsc_write_coeff(xcsc);
+
+ xcsc_write(xcsc, XV_CSC_CLIPMAX, xcsc->clip_max);
+ xcsc_write(xcsc, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+#ifdef DEBUG
+ xcsc_print_k_hw(xcsc);
+ xcsc_print_coeff(xcsc);
+#endif
+ return 0;
+}
+
+static inline struct xcsc_dev *to_csc(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xcsc_dev, xvip.subdev);
+}
+
+static struct v4l2_mbus_framefmt *
+__xcsc_get_pad_format(struct xcsc_dev *xcsc,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xcsc->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xcsc->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static void
+xcsc_correct_coeff(struct xcsc_dev *xcsc,
+ s32 temp[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ s32 csc_change[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = { {0} };
+ s32 csc_extra[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = { {0} };
+ u32 mbus_in = xcsc->formats[XVIP_PAD_SINK].code;
+ u32 mbus_out = xcsc->formats[XVIP_PAD_SOURCE].code;
+
+#ifdef DEBUG
+ xcsc_log_coeff(xcsc->xvip.dev, temp);
+#endif
+ if (mbus_in == MEDIA_BUS_FMT_RBG888_1X24 && mbus_out == mbus_in) {
+ dev_dbg(xcsc->xvip.dev, "%s : RGB to RGB", __func__);
+ xcsc_copy_coeff(xcsc->k_hw,
+ (const s32 (*)[XV_CSC_K_MAX_COLUMNS + 1])temp);
+ } else if (mbus_in == MEDIA_BUS_FMT_RBG888_1X24 &&
+ mbus_out != MEDIA_BUS_FMT_RBG888_1X24) {
+ dev_dbg(xcsc->xvip.dev, "%s : RGB to YUV", __func__);
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, csc_change);
+ xcsc_matrix_multiply(temp, csc_change, xcsc->k_hw);
+ } else if (mbus_in != MEDIA_BUS_FMT_RBG888_1X24 &&
+ mbus_out == MEDIA_BUS_FMT_RBG888_1X24) {
+ dev_dbg(xcsc->xvip.dev, "%s : YUV to RGB", __func__);
+ xcsc_ycrcb_to_rgb(xcsc, &xcsc->clip_max, csc_change);
+ xcsc_matrix_multiply(csc_change, temp, xcsc->k_hw);
+ } else if (mbus_in != MEDIA_BUS_FMT_RBG888_1X24 &&
+ mbus_out != MEDIA_BUS_FMT_RBG888_1X24) {
+ dev_dbg(xcsc->xvip.dev, "%s : YUV to YUV", __func__);
+ xcsc_ycrcb_to_rgb(xcsc, &xcsc->clip_max, csc_change);
+ xcsc_matrix_multiply(csc_change, temp, csc_extra);
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, csc_change);
+ xcsc_matrix_multiply(csc_extra, csc_change, xcsc->k_hw);
+ } else {
+ /* Should never get here */
+ WARN_ON(1);
+ }
+}
+
+static void xcsc_set_brightness(struct xcsc_dev *xcsc)
+{
+ unsigned int i, j;
+
+ dev_dbg(xcsc->xvip.dev,
+ "%s : Brightness %d Brightness Active %d",
+ __func__,
+ ((xcsc->brightness - 20) / 2),
+ ((xcsc->brightness_active - 20) / 2));
+ if (xcsc->brightness == xcsc->brightness_active)
+ return;
+ for (i = 0; i < XV_CSC_K_MAX_ROWS; i++) {
+ for (j = 0; j < XV_CSC_K_MAX_COLUMNS; j++) {
+ xcsc->shadow_coeff[i][j] = (xcsc->shadow_coeff[i][j] *
+ xcsc->brightness) /
+ xcsc->brightness_active;
+ }
+ }
+ xcsc->brightness_active = xcsc->brightness;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+}
+
+static void xcsc_set_contrast(struct xcsc_dev *xcsc)
+{
+ s32 contrast;
+ u8 scale = BIT(xcsc->color_depth - 8);
+
+ contrast = xcsc->contrast - xcsc->contrast_active;
+ dev_dbg(xcsc->xvip.dev,
+ "%s : Contrast Difference %d scale = %d",
+ __func__, contrast, scale);
+ /* Avoid updates if same */
+ if (!contrast)
+ return;
+ /* Update RGB Offsets */
+ xcsc->shadow_coeff[0][3] += contrast * scale;
+ xcsc->shadow_coeff[1][3] += contrast * scale;
+ xcsc->shadow_coeff[2][3] += contrast * scale;
+ xcsc->contrast_active = xcsc->contrast;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+}
+
+static void xcsc_set_red_gain(struct xcsc_dev *xcsc)
+{
+ dev_dbg(xcsc->xvip.dev,
+ "%s: Red Gain %d Red Gain Active %d", __func__,
+ (xcsc->red_gain - 20) / 2,
+ (xcsc->red_gain_active - 20) / 2);
+
+ if (xcsc->red_gain != xcsc->red_gain_active) {
+ xcsc->shadow_coeff[0][0] = (xcsc->shadow_coeff[0][0] *
+ xcsc->red_gain) /
+ xcsc->red_gain_active;
+ xcsc->shadow_coeff[0][1] = (xcsc->shadow_coeff[0][1] *
+ xcsc->red_gain) /
+ xcsc->red_gain_active;
+ xcsc->shadow_coeff[0][2] = (xcsc->shadow_coeff[0][2] *
+ xcsc->red_gain) /
+ xcsc->red_gain_active;
+ xcsc->red_gain_active = xcsc->red_gain;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+ }
+}
+
+static void xcsc_set_green_gain(struct xcsc_dev *xcsc)
+{
+ dev_dbg(xcsc->xvip.dev,
+ "%s: Green Gain %d Green Gain Active %d", __func__,
+ (xcsc->green_gain - 20) / 2,
+ (xcsc->green_gain_active - 20) / 2);
+
+ if (xcsc->green_gain != xcsc->green_gain_active) {
+ xcsc->shadow_coeff[1][0] = (xcsc->shadow_coeff[1][0] *
+ xcsc->green_gain) /
+ xcsc->green_gain_active;
+ xcsc->shadow_coeff[1][1] = (xcsc->shadow_coeff[1][1] *
+ xcsc->green_gain) /
+ xcsc->green_gain_active;
+ xcsc->shadow_coeff[1][2] = (xcsc->shadow_coeff[1][2] *
+ xcsc->green_gain) /
+ xcsc->green_gain_active;
+ xcsc->green_gain_active = xcsc->green_gain;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+ }
+}
+
+static void xcsc_set_blue_gain(struct xcsc_dev *xcsc)
+{
+ dev_dbg(xcsc->xvip.dev,
+ "%s: Blue Gain %d Blue Gain Active %d", __func__,
+ (xcsc->blue_gain - 20) / 2,
+ (xcsc->blue_gain_active - 20) / 2);
+
+ if (xcsc->blue_gain != xcsc->blue_gain_active) {
+ xcsc->shadow_coeff[2][0] = (xcsc->shadow_coeff[2][0] *
+ xcsc->blue_gain) /
+ xcsc->blue_gain_active;
+ xcsc->shadow_coeff[2][1] = (xcsc->shadow_coeff[2][1] *
+ xcsc->blue_gain) /
+ xcsc->blue_gain_active;
+ xcsc->shadow_coeff[2][2] = (xcsc->shadow_coeff[2][2] *
+ xcsc->blue_gain) /
+ xcsc->blue_gain_active;
+ xcsc->blue_gain_active = xcsc->blue_gain;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+ }
+}
+
+static void xcsc_set_size(struct xcsc_dev *xcsc)
+{
+ u32 width, height;
+
+ width = xcsc->formats[XVIP_PAD_SINK].width;
+ height = xcsc->formats[XVIP_PAD_SINK].height;
+ dev_dbg(xcsc->xvip.dev, "%s : Setting width %d and height %d",
+ __func__, width, height);
+ xcsc_write(xcsc, XV_CSC_WIDTH, width);
+ xcsc_write(xcsc, XV_CSC_HEIGHT, height);
+}
+
+static int xcsc_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xcsc_dev *xcsc = to_csc(subdev);
+
+ dev_dbg(xcsc->xvip.dev, "%s : Stream %s", __func__,
+ enable ? "On" : "Off");
+ if (!enable) {
+ /* Reset the Global IP Reset through PS GPIO */
+ gpiod_set_value_cansleep(xcsc->rst_gpio, XCSC_RESET_ASSERT);
+ gpiod_set_value_cansleep(xcsc->rst_gpio, XCSC_RESET_DEASSERT);
+ return 0;
+ }
+ xcsc_write(xcsc, XV_CSC_INVIDEOFORMAT, xcsc->cft_in);
+ xcsc_write(xcsc, XV_CSC_OUTVIDEOFORMAT, xcsc->cft_out);
+ xcsc_write(xcsc, XV_CSC_CLIPMAX, xcsc->clip_max);
+ xcsc_write(xcsc, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+ xcsc_set_size(xcsc);
+ xcsc_write_coeff(xcsc);
+#ifdef DEBUG
+ xcsc_print_coeff(xcsc);
+ dev_dbg(xcsc->xvip.dev, "cft_in = %d cft_out = %d",
+ xcsc_read(xcsc, XV_CSC_INVIDEOFORMAT),
+ xcsc_read(xcsc, XV_CSC_OUTVIDEOFORMAT));
+ dev_dbg(xcsc->xvip.dev, "clipmax = %d clampmin = %d",
+ xcsc_read(xcsc, XV_CSC_CLIPMAX),
+ xcsc_read(xcsc, XV_CSC_CLAMPMIN));
+ dev_dbg(xcsc->xvip.dev, "height = %d width = %d",
+ xcsc_read(xcsc, XV_CSC_HEIGHT),
+ xcsc_read(xcsc, XV_CSC_WIDTH));
+#endif
+ /* Start VPSS CSC IP */
+ xcsc_write(xcsc, XV_CSC_AP_CTRL, XCSC_STREAM_ON);
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops xcsc_video_ops = {
+ .s_stream = xcsc_s_stream,
+};
+
+static int xcsc_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcsc_dev *xcsc = to_csc(subdev);
+
+ fmt->format = *__xcsc_get_pad_format(xcsc, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static int xcsc_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcsc_dev *xcsc = to_csc(subdev);
+ struct v4l2_mbus_framefmt *__format;
+ struct v4l2_mbus_framefmt *__propagate;
+
+ __format = __xcsc_get_pad_format(xcsc, cfg, fmt->pad, fmt->which);
+ /* Propagate to Source Pad */
+ __propagate = __xcsc_get_pad_format(xcsc, cfg,
+ XVIP_PAD_SOURCE, fmt->which);
+ *__format = fmt->format;
+
+ __format->width = clamp_t(unsigned int, fmt->format.width,
+ XV_CSC_MIN_WIDTH, xcsc->max_width);
+ __format->height = clamp_t(unsigned int, fmt->format.height,
+ XV_CSC_MIN_HEIGHT, xcsc->max_height);
+
+ switch (__format->code) {
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ break;
+ default:
+ /* Unsupported Format. Default to RGB */
+ __format->code = MEDIA_BUS_FMT_RBG888_1X24;
+ return -EINVAL;
+ }
+
+ /* Always propagate Sink image size to Source */
+ __propagate->width = __format->width;
+ __propagate->height = __format->height;
+
+ fmt->format = *__format;
+ xcsc_update_formats(xcsc);
+ xcsc_set_control_defaults(xcsc);
+ xcsc_set_v4l2_ctrl_defaults(xcsc);
+ dev_info(xcsc->xvip.dev, "VPSS CSC color controls reset to defaults");
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops xcsc_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xcsc_get_format,
+ .set_fmt = xcsc_set_format,
+};
+
+static const struct v4l2_subdev_ops xcsc_ops = {
+ .video = &xcsc_video_ops,
+ .pad = &xcsc_pad_ops
+};
+
+static int xcsc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct xcsc_dev *xcsc = container_of(ctrl->handler,
+ struct xcsc_dev,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_CSC_BRIGHTNESS:
+ xcsc->brightness = (2 * ctrl->val) + 20;
+ xcsc_set_brightness(xcsc);
+ break;
+ case V4L2_CID_XILINX_CSC_CONTRAST:
+ xcsc->contrast = (4 * ctrl->val) - 200;
+ xcsc_set_contrast(xcsc);
+ break;
+ case V4L2_CID_XILINX_CSC_RED_GAIN:
+ xcsc->red_gain = (2 * ctrl->val) + 20;
+ xcsc_set_red_gain(xcsc);
+ break;
+ case V4L2_CID_XILINX_CSC_BLUE_GAIN:
+ xcsc->blue_gain = (2 * ctrl->val) + 20;
+ xcsc_set_blue_gain(xcsc);
+ break;
+ case V4L2_CID_XILINX_CSC_GREEN_GAIN:
+ xcsc->green_gain = (2 * ctrl->val) + 20;
+ xcsc_set_green_gain(xcsc);
+ break;
+ }
+#ifdef DEBUG
+ xcsc_print_k_hw(xcsc);
+ xcsc_print_coeff(xcsc);
+#endif
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops xcsc_ctrl_ops = {
+ .s_ctrl = xcsc_s_ctrl,
+};
+
+static struct v4l2_ctrl_config xcsc_color_ctrls[XCSC_COLOR_CTRL_COUNT] = {
+ /* Brightness */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_BRIGHTNESS,
+ .name = "CSC Brightness",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Contrast */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_CONTRAST,
+ .name = "CSC Contrast",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Red Gain */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_RED_GAIN,
+ .name = "CSC Red Gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Blue Gain */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_BLUE_GAIN,
+ .name = "CSC Blue Gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Green Gain */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_GREEN_GAIN,
+ .name = "CSC Green Gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+};
+
+static int xcsc_open(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ struct xcsc_dev *xcsc = to_csc(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xcsc->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xcsc->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xcsc_close(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops xcsc_internal_ops = {
+ .open = xcsc_open,
+ .close = xcsc_close,
+};
+
+static const struct media_entity_operations xcsc_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int xcsc_parse_of(struct xcsc_dev *xcsc)
+{
+ struct device *dev = xcsc->xvip.dev;
+ struct device_node *node = xcsc->xvip.dev->of_node;
+ const struct xvip_video_format *vip_format;
+ struct device_node *ports, *port;
+ int rval;
+ u32 port_id = 0;
+ u32 video_width[2];
+
+ rval = of_property_read_u32(node, "xlnx,max-height", &xcsc->max_height);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (xcsc->max_height > XV_CSC_MAX_HEIGHT ||
+ xcsc->max_height < XV_CSC_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,max-width", &xcsc->max_width);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (xcsc->max_width > XV_CSC_MAX_WIDTH ||
+ xcsc->max_width < XV_CSC_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "Invalid media pad format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ rval = of_property_read_u32(port, "reg", &port_id);
+ if (rval < 0) {
+ dev_err(dev, "No reg in DT to specify pad");
+ return rval;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "Invalid reg in DT");
+ return -EINVAL;
+ }
+ xcsc->vip_formats[port_id] = vip_format;
+
+ rval = of_property_read_u32(port, "xlnx,video-width",
+ &video_width[port_id]);
+ if (rval < 0) {
+ dev_err(dev,
+ "DT Port%d xlnx,video-width not found",
+ port_id);
+ return rval;
+ }
+ }
+ }
+ if (video_width[0] != video_width[1]) {
+ dev_err(dev, "Changing video width in DT not supported");
+ return -EINVAL;
+ }
+ switch (video_width[0]) {
+ case XVIDC_BPC_8:
+ case XVIDC_BPC_10:
+ xcsc->color_depth = video_width[0];
+ break;
+ default:
+ dev_err(dev, "Unsupported color depth %d", video_width[0]);
+ return -EINVAL;
+ }
+ /* Reset GPIO */
+ xcsc->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xcsc->rst_gpio)) {
+ if (PTR_ERR(xcsc->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(xcsc->rst_gpio);
+ }
+ return 0;
+}
+
+static int xcsc_probe(struct platform_device *pdev)
+{
+ struct xcsc_dev *xcsc;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *def_fmt;
+ int rval, itr;
+
+ xcsc = devm_kzalloc(&pdev->dev, sizeof(*xcsc), GFP_KERNEL);
+ if (!xcsc)
+ return -ENOMEM;
+
+ xcsc->xvip.dev = &pdev->dev;
+
+ rval = xcsc_parse_of(xcsc);
+ if (rval < 0)
+ return rval;
+
+ /* Reset and initialize the core */
+ gpiod_set_value_cansleep(xcsc->rst_gpio, XCSC_RESET_DEASSERT);
+ rval = xvip_init_resources(&xcsc->xvip);
+ if (rval < 0)
+ return rval;
+
+ /* Init v4l2 subdev */
+ subdev = &xcsc->xvip.subdev;
+ v4l2_subdev_init(subdev, &xcsc_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xcsc_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xcsc);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Default Formats Initialization */
+ xcsc_set_default_state(xcsc);
+ def_fmt = &xcsc->default_formats[XVIP_PAD_SINK];
+ def_fmt->code = xcsc->vip_formats[XVIP_PAD_SINK]->code;
+ def_fmt->field = V4L2_FIELD_NONE;
+ def_fmt->colorspace = V4L2_COLORSPACE_REC709;
+ def_fmt->width = XV_CSC_DEFAULT_WIDTH;
+ def_fmt->height = XV_CSC_DEFAULT_HEIGHT;
+ xcsc->formats[XVIP_PAD_SINK] = *def_fmt;
+ /* Source supports only YUV 444, YUV 422, and RGB */
+ def_fmt = &xcsc->default_formats[XVIP_PAD_SOURCE];
+ *def_fmt = xcsc->default_formats[XVIP_PAD_SINK];
+ def_fmt->code = xcsc->vip_formats[XVIP_PAD_SOURCE]->code;
+ def_fmt->width = XV_CSC_DEFAULT_WIDTH;
+ def_fmt->height = XV_CSC_DEFAULT_HEIGHT;
+ xcsc->formats[XVIP_PAD_SOURCE] = *def_fmt;
+ xcsc->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xcsc->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Init Media Entity */
+ subdev->entity.ops = &xcsc_media_ops;
+ rval = media_entity_pads_init(&subdev->entity, 2, xcsc->pads);
+ if (rval < 0)
+ goto media_error;
+ /* V4L2 Control Setup */
+ v4l2_ctrl_handler_init(&xcsc->ctrl_handler,
+ ARRAY_SIZE(xcsc_color_ctrls));
+ for (itr = 0; itr < ARRAY_SIZE(xcsc_color_ctrls); itr++) {
+ xcsc->custom_ctrls[itr] =
+ v4l2_ctrl_new_custom(&xcsc->ctrl_handler,
+ &xcsc_color_ctrls[itr], NULL);
+ }
+ if (xcsc->ctrl_handler.error) {
+ dev_err(&pdev->dev, "Failed to add v4l2 controls");
+ rval = xcsc->ctrl_handler.error;
+ goto ctrl_error;
+ }
+ subdev->ctrl_handler = &xcsc->ctrl_handler;
+ rval = v4l2_ctrl_handler_setup(&xcsc->ctrl_handler);
+ if (rval < 0) {
+ dev_err(xcsc->xvip.dev, "Failed to setup control handler");
+ goto ctrl_error;
+ }
+ platform_set_drvdata(pdev, xcsc);
+ rval = v4l2_async_register_subdev(subdev);
+ if (rval < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto ctrl_error;
+ }
+ dev_info(&pdev->dev, "VPSS CSC %d-bit Color Depth Probe Successful",
+ xcsc->color_depth);
+ return 0;
+ctrl_error:
+ v4l2_ctrl_handler_free(&xcsc->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+media_error:
+ xvip_cleanup_resources(&xcsc->xvip);
+ return rval;
+}
+
+static int xcsc_remove(struct platform_device *pdev)
+{
+ struct xcsc_dev *xcsc = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xcsc->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xcsc->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xcsc->xvip);
+ return 0;
+}
+
+static const struct of_device_id xcsc_of_id_table[] = {
+ {.compatible = "xlnx,v-vpss-csc"},
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, xcsc_of_id_table);
+
+static struct platform_driver xcsc_driver = {
+ .driver = {
+ .name = "xilinx-vpss-csc",
+ .of_match_table = xcsc_of_id_table,
+ },
+ .probe = xcsc_probe,
+ .remove = xcsc_remove,
+};
+
+module_platform_driver(xcsc_driver);
+MODULE_DESCRIPTION("Xilinx VPSS CSC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-vpss-scaler.c b/drivers/media/platform/xilinx/xilinx-vpss-scaler.c
new file mode 100644
index 000000000000..e07d7eaad259
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vpss-scaler.c
@@ -0,0 +1,1878 @@
+/*
+ * Xilinx VPSS Scaler
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+#include "xilinx-vip.h"
+
+#define XSCALER_MIN_WIDTH (64)
+#define XSCALER_MAX_WIDTH (8192)
+#define XSCALER_MIN_HEIGHT (64)
+#define XSCALER_MAX_HEIGHT (4320)
+#define XSCALER_MAX_PHASES (64)
+
+/* Modify to defaults incase it is not configured from application */
+#define XSCALER_DEF_IN_HEIGHT (720)
+#define XSCALER_DEF_IN_WIDTH (1280)
+#define XSCALER_DEF_OUT_HEIGHT (1080)
+#define XSCALER_DEF_OUT_WIDTH (1920)
+
+#define XSCALER_HSF (0x0100)
+#define XSCALER_VSF (0x0104)
+#define XSCALER_SF_SHIFT (20)
+#define XSCALER_SF_MASK (0xffffff)
+#define XSCALER_SOURCE_SIZE (0x0108)
+#define XSCALER_SIZE_HORZ_SHIFT (0)
+#define XSCALER_SIZE_VERT_SHIFT (16)
+#define XSCALER_SIZE_MASK (0xfff)
+#define XSCALER_HAPERTURE (0x010c)
+#define XSCALER_VAPERTURE (0x0110)
+#define XSCALER_APERTURE_START_SHIFT (0)
+#define XSCALER_APERTURE_END_SHIFT (16)
+#define XSCALER_OUTPUT_SIZE (0x0114)
+#define XSCALER_COEF_DATA_IN (0x0134)
+#define XSCALER_BITSHIFT_16 (16)
+
+/* Video subsytems block offset */
+#define S_AXIS_RESET_OFF (0x00010000)
+#define V_HSCALER_OFF (0x00000000)
+#define V_VSCALER_OFF (0x00020000)
+
+/* HW Reset Network GPIO Channel */
+#define XGPIO_CH_RESET_SEL (1)
+#define XGPIO_RESET_MASK_VIDEO_IN BIT(0)
+#define XGPIO_RESET_MASK_IP_AXIS BIT(1)
+#define XGPIO_RESET_MASK_IP_AXIMM BIT(0)
+#define XGPIO_RESET_MASK_ALL_BLOCKS (XGPIO_RESET_MASK_VIDEO_IN | \
+ XGPIO_RESET_MASK_IP_AXIS)
+#define XGPIO_DATA_OFFSET (0x0)
+#define XGPIO_TRI_OFFSET (0x4)
+#define XGPIO_DATA2_OFFSET (0x8)
+#define XGPIO_TRI2_OFFSET (0xc)
+
+#define XGPIO_GIE_OFFSET (0x11c)
+#define XGPIO_ISR_OFFSET (0x120)
+#define XGPIO_IER_OFFSET (0x128)
+#define XGPIO_CHAN_OFFSET (8)
+#define STEP_PRECISION (65536)
+
+/* Video IP Formats */
+enum xscaler_vid_reg_fmts {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+/* Video IP PPC */
+#define XSCALER_PPC_1 (1)
+#define XSCALER_PPC_2 (2)
+
+#define XV_HSCALER_MAX_H_TAPS (12)
+#define XV_HSCALER_MAX_H_PHASES (64)
+#define XV_HSCALER_MAX_LINE_WIDTH (3840)
+#define XV_VSCALER_MAX_V_TAPS (12)
+#define XV_VSCALER_MAX_V_PHASES (64)
+
+#define XV_HSCALER_TAPS_2 (2)
+#define XV_HSCALER_TAPS_4 (4)
+#define XV_HSCALER_TAPS_6 (6)
+#define XV_HSCALER_TAPS_8 (8)
+#define XV_HSCALER_TAPS_10 (10)
+#define XV_HSCALER_TAPS_12 (12)
+#define XV_VSCALER_TAPS_2 (2)
+#define XV_VSCALER_TAPS_4 (4)
+#define XV_VSCALER_TAPS_6 (6)
+#define XV_VSCALER_TAPS_8 (8)
+#define XV_VSCALER_TAPS_10 (10)
+#define XV_VSCALER_TAPS_12 (12)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XHSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XHSC_MASK_HIGH_16BITS GENMASK(31, 16)
+#define XHSC_MASK_LOW_32BITS GENMASK(31, 0)
+#define XHSC_STEP_PRECISION_SHIFT (16)
+#define XHSC_HPHASE_SHIFT_BY_6 (6)
+#define XHSC_HPHASE_MULTIPLIER (9)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XVSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XVSC_MASK_HIGH_16BITS GENMASK(31, 16)
+
+/* XSCALER POWER MACROS */
+#define XSCALER_RESET_ASSERT (0x1)
+#define XSCALER_RESET_DEASSERT (0x0)
+
+/* Scaler AP Control Registers */
+#define XSCALER_START BIT(0)
+#define XSCALER_AUTO_RESTART BIT(7)
+#define XSCALER_STREAM_ON (XSCALER_START | XSCALER_AUTO_RESTART)
+
+/* H-scaler registers */
+#define XV_HSCALER_CTRL_ADDR_AP_CTRL (0x0000)
+#define XV_HSCALER_CTRL_ADDR_GIE (0x0004)
+#define XV_HSCALER_CTRL_ADDR_IER (0x0008)
+#define XV_HSCALER_CTRL_ADDR_ISR (0x000c)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA (0x0010)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA (0x0018)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA (0x0020)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x0028)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA (0x0030)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA (0X0038)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE (0x0800)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_HIGH (0x0bff)
+
+/* H-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const u16
+xhsc_coeff_taps6[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { -132, 236, 3824, 236, -132, 64, },
+ { -116, 184, 3816, 292, -144, 64, },
+ { -100, 132, 3812, 348, -160, 64, },
+ { -88, 84, 3808, 404, -176, 64, },
+ { -72, 36, 3796, 464, -192, 64, },
+ { -60, -8, 3780, 524, -208, 68, },
+ { -48, -52, 3768, 588, -228, 68, },
+ { -32, -96, 3748, 652, -244, 68, },
+ { -20, -136, 3724, 716, -260, 72, },
+ { -8, -172, 3696, 784, -276, 72, },
+ { 0, -208, 3676, 848, -292, 72, },
+ { 12, -244, 3640, 920, -308, 76, },
+ { 20, -276, 3612, 988, -324, 76, },
+ { 32, -304, 3568, 1060, -340, 80, },
+ { 40, -332, 3532, 1132, -356, 80, },
+ { 48, -360, 3492, 1204, -372, 84, },
+ { 56, -384, 3448, 1276, -388, 88, },
+ { 64, -408, 3404, 1352, -404, 88, },
+ { 72, -428, 3348, 1428, -416, 92, },
+ { 76, -448, 3308, 1500, -432, 92, },
+ { 84, -464, 3248, 1576, -444, 96, },
+ { 88, -480, 3200, 1652, -460, 96, },
+ { 92, -492, 3140, 1728, -472, 100, },
+ { 96, -504, 3080, 1804, -484, 104, },
+ { 100, -516, 3020, 1880, -492, 104, },
+ { 104, -524, 2956, 1960, -504, 104, },
+ { 104, -532, 2892, 2036, -512, 108, },
+ { 108, -540, 2832, 2108, -520, 108, },
+ { 108, -544, 2764, 2184, -528, 112, },
+ { 112, -544, 2688, 2260, -532, 112, },
+ { 112, -548, 2624, 2336, -540, 112, },
+ { 112, -548, 2556, 2408, -544, 112, },
+ { 112, -544, 2480, 2480, -544, 112, },
+ { 112, -544, 2408, 2556, -548, 112, },
+ { 112, -540, 2336, 2624, -548, 112, },
+ { 112, -532, 2260, 2688, -544, 112, },
+ { 112, -528, 2184, 2764, -544, 108, },
+ { 108, -520, 2108, 2832, -540, 108, },
+ { 108, -512, 2036, 2892, -532, 104, },
+ { 104, -504, 1960, 2956, -524, 104, },
+ { 104, -492, 1880, 3020, -516, 100, },
+ { 104, -484, 1804, 3080, -504, 96, },
+ { 100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const u16
+xhsc_coeff_taps8[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const u16
+xhsc_coeff_taps10[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const u16
+xhsc_coeff_taps12[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
+
+#define XV_HSCALER_CTRL_WIDTH_HWREG_HFLTCOEFF (16)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_HFLTCOEFF (384)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE (0x2000)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_HIGH (0x3fff)
+#define XV_HSCALER_CTRL_WIDTH_HWREG_PHASESH_V (18)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_PHASESH_V (1920)
+
+/* H-scaler masks */
+#define XV_HSCALER_PHASESH_V_OUTPUT_WR_EN BIT(8)
+
+/* V-scaler registers */
+#define XV_VSCALER_CTRL_ADDR_AP_CTRL (0x000)
+#define XV_VSCALER_CTRL_ADDR_GIE (0x004)
+#define XV_VSCALER_CTRL_ADDR_IER (0x008)
+#define XV_VSCALER_CTRL_ADDR_ISR (0x00c)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA (0x010)
+#define XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA (0x018)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA (0x020)
+#define XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA (0x028)
+#define XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x030)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE (0x800)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_HIGH (0xbff)
+
+/* V-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const u16
+xvsc_coeff_taps6[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_6] = {
+ {-132, 236, 3824, 236, -132, 64, },
+ {-116, 184, 3816, 292, -144, 64, },
+ {-100, 132, 3812, 348, -160, 64, },
+ {-88, 84, 3808, 404, -176, 64, },
+ {-72, 36, 3796, 464, -192, 64, },
+ {-60, -8, 3780, 524, -208, 68, },
+ {-48, -52, 3768, 588, -228, 68, },
+ {-32, -96, 3748, 652, -244, 68, },
+ {-20, -136, 3724, 716, -260, 72, },
+ {-8, -172, 3696, 784, -276, 72, },
+ {0, -208, 3676, 848, -292, 72, },
+ {12, -244, 3640, 920, -308, 76, },
+ {20, -276, 3612, 988, -324, 76, },
+ {32, -304, 3568, 1060, -340, 80, },
+ {40, -332, 3532, 1132, -356, 80, },
+ {48, -360, 3492, 1204, -372, 84, },
+ {56, -384, 3448, 1276, -388, 88, },
+ {64, -408, 3404, 1352, -404, 88, },
+ {72, -428, 3348, 1428, -416, 92, },
+ {76, -448, 3308, 1500, -432, 92, },
+ {84, -464, 3248, 1576, -444, 96, },
+ {88, -480, 3200, 1652, -460, 96, },
+ {92, -492, 3140, 1728, -472, 100, },
+ {96, -504, 3080, 1804, -484, 104, },
+ {100, -516, 3020, 1880, -492, 104, },
+ {104, -524, 2956, 1960, -504, 104, },
+ {104, -532, 2892, 2036, -512, 108, },
+ {108, -540, 2832, 2108, -520, 108, },
+ {108, -544, 2764, 2184, -528, 112, },
+ {112, -544, 2688, 2260, -532, 112, },
+ {112, -548, 2624, 2336, -540, 112, },
+ {112, -548, 2556, 2408, -544, 112, },
+ {112, -544, 2480, 2480, -544, 112, },
+ {112, -544, 2408, 2556, -548, 112, },
+ {112, -540, 2336, 2624, -548, 112, },
+ {112, -532, 2260, 2688, -544, 112, },
+ {112, -528, 2184, 2764, -544, 108, },
+ {108, -520, 2108, 2832, -540, 108, },
+ {108, -512, 2036, 2892, -532, 104, },
+ {104, -504, 1960, 2956, -524, 104, },
+ {104, -492, 1880, 3020, -516, 100, },
+ {104, -484, 1804, 3080, -504, 96, },
+ {100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const u16
+xvsc_coeff_taps8[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const u16
+xvsc_coeff_taps10[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const u16
+xvsc_coeff_taps12[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
+
+#define XV_VSCALER_CTRL_WIDTH_HWREG_VFLTCOEFF (16)
+#define XV_VSCALER_CTRL_DEPTH_HWREG_VFLTCOEFF (384)
+
+#define XSCALER_CLK_PROP BIT(0)
+
+/**
+ * struct xscaler_feature - dt or IP property structure
+ * @flags: Bitmask of properties enabled in IP or dt
+ */
+struct xscaler_feature {
+ u32 flags;
+};
+
+/**
+ * struct xscaler_device - Xilinx Scaler device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: Scaler sub-device media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: Xilinx Video IP format retrieved from the DT
+ * @num_hori_taps: number of horizontal taps
+ * @num_vert_taps: number of vertical taps
+ * @max_num_phases: maximum number of phases
+ * @pix_per_clk: Pixels per Clock cycle the IP operates upon
+ * @max_pixels: The maximum number of pixels that the H-scaler examines
+ * @max_lines: The maximum number of lines that the V-scaler examines
+ * @H_phases: The phases needed to program the H-scaler for different taps
+ * @hscaler_coeff: The complete array of H-scaler coefficients
+ * @vscaler_coeff: The complete array of V-scaler coefficients
+ * @is_polyphase: Track if scaling algorithm is polyphase or not
+ * @rst_gpio: GPIO reset line to bring VPSS Scaler out of reset
+ * @cfg: Pointer to scaler config structure
+ * @aclk_axis: AXI4-Stream video interface clock
+ * @aclk_ctrl: AXI4-Lite control interface clock
+ */
+struct xscaler_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+
+ u32 num_hori_taps;
+ u32 num_vert_taps;
+ u32 max_num_phases;
+ u32 pix_per_clk;
+ u32 max_pixels;
+ u32 max_lines;
+ u32 H_phases[XV_HSCALER_MAX_LINE_WIDTH];
+ short hscaler_coeff[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_MAX_H_TAPS];
+ short vscaler_coeff[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_MAX_V_TAPS];
+ bool is_polyphase;
+
+ struct gpio_desc *rst_gpio;
+ const struct xscaler_feature *cfg;
+ struct clk *aclk_axis;
+ struct clk *aclk_ctrl;
+};
+
+static const struct xscaler_feature xlnx_scaler_v1_0 = {
+ .flags = XSCALER_CLK_PROP,
+};
+
+static const struct xscaler_feature xlnx_scaler = {
+ .flags = 0,
+};
+
+static const struct of_device_id xscaler_of_id_table[] = {
+ { .compatible = "xlnx,v-vpss-scaler",
+ .data = &xlnx_scaler},
+ { .compatible = "xlnx,v-vpss-scaler-1.0",
+ .data = &xlnx_scaler_v1_0},
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, xscaler_of_id_table);
+
+static inline struct xscaler_device *to_scaler(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xscaler_device, xvip.subdev);
+}
+
+static void
+xv_hscaler_calculate_phases(struct xscaler_device *xscaler,
+ u32 width_in, u32 width_out, u32 pixel_rate)
+{
+ unsigned int loop_width;
+ unsigned int x, s;
+ int offset = 0;
+ int xwrite_pos = 0;
+ bool output_write_en;
+ bool get_new_pix;
+ u64 phaseH;
+ u32 array_idx = 0;
+ int nr_rds;
+ int nr_rds_clck;
+ unsigned int nphases = xscaler->max_num_phases;
+ unsigned int nppc = xscaler->pix_per_clk;
+ unsigned int shift = XHSC_STEP_PRECISION_SHIFT - ilog2(nphases);
+
+ loop_width = max_t(u32, width_in, width_out);
+ loop_width = ALIGN(loop_width + nppc - 1, nppc);
+
+ for (x = 0; x < loop_width; x++) {
+ nr_rds_clck = 0;
+ for (s = 0; s < nppc; s++) {
+ phaseH = (offset >> shift) & (nphases - 1);
+ get_new_pix = false;
+ output_write_en = false;
+ if ((offset >> XHSC_STEP_PRECISION_SHIFT) != 0) {
+ /* read a new input sample */
+ get_new_pix = true;
+ offset -= (1 << XHSC_STEP_PRECISION_SHIFT);
+ array_idx++;
+ }
+
+ if (((offset >> XHSC_STEP_PRECISION_SHIFT) == 0) &&
+ (xwrite_pos < width_out)) {
+ /* produce a new output sample */
+ offset += pixel_rate;
+ output_write_en = true;
+ xwrite_pos++;
+ }
+
+ /* Needs updates for 4 PPC */
+ xscaler->H_phases[x] |= (phaseH <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ xscaler->H_phases[x] |= (array_idx <<
+ (XHSC_HPHASE_SHIFT_BY_6 +
+ (s * XHSC_HPHASE_MULTIPLIER)));
+ if (output_write_en) {
+ xscaler->H_phases[x] |=
+ (XV_HSCALER_PHASESH_V_OUTPUT_WR_EN <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ }
+
+ if (get_new_pix)
+ nr_rds_clck++;
+ }
+ if (array_idx >= nppc)
+ array_idx &= (nppc - 1);
+
+ nr_rds += nr_rds_clck;
+ if (nr_rds >= nppc)
+ nr_rds -= nppc;
+ }
+}
+
+static void
+xv_hscaler_load_ext_coeff(struct xscaler_device *xscaler,
+ const short *coeff, u32 ntaps)
+{
+ unsigned int i, j, pad, offset;
+ u32 nphases = xscaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_HSCALER_MAX_H_TAPS - ntaps;
+ offset = pad >> 1;
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Pad = %d Offset = %d Nphases = %d ntaps = %d",
+ __func__, pad, offset, nphases, ntaps);
+
+ /* Load coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xscaler->hscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+
+ if (pad) { /* effective taps < max_taps */
+ for (i = 0; i < nphases; i++) {
+ /* pad left */
+ for (j = 0; j < offset; j++)
+ xscaler->hscaler_coeff[i][j] = 0;
+ /* pad right */
+ j = ntaps + offset;
+ for (; j < XV_HSCALER_MAX_H_TAPS; j++)
+ xscaler->hscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+/**
+ * xv_hscaler_coeff_select - Selection of H-Scaler coefficients of operation
+ * @xscaler: VPSS Scaler device information
+ * @width_in: Width of input video
+ * @width_out: Width of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 12-tap
+ * filter may operate with 10 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * H-scaler number of taps.
+ */
+static int
+xv_hscaler_select_coeff(struct xscaler_device *xscaler,
+ u32 width_in, u32 width_out)
+{
+ const short *coeff;
+ u16 hscale_ratio;
+ u32 ntaps = xscaler->num_hori_taps;
+
+ /*
+ * Scale Down Mode will use dynamic filter selection logic
+ * Scale Up Mode (including 1:1) will always use 6 tap filter
+ */
+ if (width_out < width_in) {
+ hscale_ratio = ((width_in * 10) / width_out);
+
+ switch (xscaler->num_hori_taps) {
+ case XV_HSCALER_TAPS_6:
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ break;
+ case XV_HSCALER_TAPS_8:
+ if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_10:
+ if (hscale_ratio > 25) {
+ coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XV_HSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_12:
+ if (hscale_ratio > 35) {
+ coeff = &xhsc_coeff_taps12[0][0];
+ ntaps = XV_HSCALER_TAPS_12;
+ } else if (hscale_ratio > 25) {
+ coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XV_HSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Unsupported H-scaler number of taps = %d",
+ xscaler->num_hori_taps);
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(xscaler->xvip.dev, "H-scaler : scale up 6 tap");
+ coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XV_HSCALER_TAPS_6;
+ }
+ xv_hscaler_load_ext_coeff(xscaler, coeff, ntaps);
+ return 0;
+}
+
+static void xv_hscaler_set_coeff(struct xscaler_device *xscaler)
+{
+ int val, i, j, offset, rd_indx;
+ u32 ntaps = xscaler->num_hori_taps;
+ u32 nphases = xscaler->max_num_phases;
+ u32 base_addr;
+
+ offset = (XV_HSCALER_MAX_H_TAPS - ntaps) / 2;
+ base_addr = V_HSCALER_OFF + XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE;
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xscaler->hscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (xscaler->hscaler_coeff[i][rd_indx] &
+ XHSC_MASK_LOW_16BITS);
+ xvip_write(&xscaler->xvip, base_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+static void
+xv_vscaler_load_ext_coeff(struct xscaler_device *xscaler,
+ const short *coeff, u32 ntaps)
+{
+ int i, j, pad, offset;
+ u32 nphases = xscaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_VSCALER_MAX_V_TAPS - ntaps;
+ offset = pad ? (pad >> 1) : 0;
+
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Pad = %d Offset = %d Nphases = %d ntaps = %d",
+ __func__, pad, offset, nphases, ntaps);
+
+ /* Load User defined coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xscaler->vscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+
+ if (pad) { /* effective taps < max_taps */
+ for (i = 0; i < nphases; i++) {
+ /* pad left */
+ for (j = 0; j < offset; j++)
+ xscaler->vscaler_coeff[i][j] = 0;
+ /* pad right */
+ j = ntaps + offset;
+ for (; j < XV_VSCALER_MAX_V_TAPS; j++)
+ xscaler->vscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+static void xv_vscaler_set_coeff(struct xscaler_device *xscaler)
+{
+ u32 nphases = xscaler->max_num_phases;
+ u32 ntaps = xscaler->num_vert_taps;
+ int val, i, j, offset, rd_indx;
+ u32 base_addr;
+
+ offset = (XV_VSCALER_MAX_V_TAPS - ntaps) / 2;
+ base_addr = V_VSCALER_OFF + XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE;
+
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xscaler->vscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (xscaler->vscaler_coeff[i][rd_indx] &
+ XVSC_MASK_LOW_16BITS);
+ xvip_write(&xscaler->xvip,
+ base_addr + ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+/**
+ * xv_vscaler_coeff_select - Selection of V-Scaler coefficients of operation
+ * @xscaler: VPSS Scaler device information
+ * @height_in: Height of input video
+ * @height_out: Height of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 10-tap
+ * filter may operate with 6 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * V-scaler number of taps.
+ */
+static int
+xv_vscaler_select_coeff(struct xscaler_device *xscaler,
+ u32 height_in, u32 height_out)
+{
+ const short *coeff;
+ u16 vscale_ratio;
+ u32 ntaps = xscaler->num_vert_taps;
+
+ /*
+ * Scale Down Mode will use dynamic filter selection logic
+ * Scale Up Mode (including 1:1) will always use 6 tap filter
+ */
+
+ if (height_out < height_in) {
+ vscale_ratio = ((height_in * 10) / height_out);
+
+ switch (xscaler->num_vert_taps) {
+ case XV_VSCALER_TAPS_6:
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ break;
+ case XV_VSCALER_TAPS_8:
+ if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ case XV_VSCALER_TAPS_10:
+ if (vscale_ratio > 25) {
+ coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XV_VSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ case XV_VSCALER_TAPS_12:
+ if (vscale_ratio > 35) {
+ coeff = &xvsc_coeff_taps12[0][0];
+ ntaps = XV_VSCALER_TAPS_12;
+ } else if (vscale_ratio > 25) {
+ coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XV_VSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XV_VSCALER_TAPS_8;
+ } else {
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Unsupported V-scaler number of taps = %d",
+ xscaler->num_vert_taps);
+ return -EINVAL;
+ }
+ } else {
+ dev_dbg(xscaler->xvip.dev, "V-scaler : scale up 6 tap");
+ coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XV_VSCALER_TAPS_6;
+ }
+
+ xv_vscaler_load_ext_coeff(xscaler, coeff, ntaps);
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+static inline void
+xv_procss_disable_block(struct xvip_device *xvip, u32 channel, u32 ip_block)
+{
+ xvip_clr(xvip, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF,
+ ip_block);
+}
+
+static inline void
+xv_procss_enable_block(struct xvip_device *xvip, u32 channel, u32 ip_block)
+{
+ xvip_set(xvip, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF,
+ ip_block);
+}
+
+static void xscaler_reset(struct xscaler_device *xscaler)
+{
+ xv_procss_disable_block(&xscaler->xvip, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_ALL_BLOCKS);
+ xv_procss_enable_block(&xscaler->xvip, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_IP_AXIS);
+}
+
+static int
+xv_vscaler_setup_video_fmt(struct xscaler_device *xscaler, u32 code_in)
+{
+ u32 video_in;
+
+ switch (code_in) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ dev_dbg(xscaler->xvip.dev,
+ "Vscaler Input Media Format YUV 420");
+ video_in = XVIDC_CSF_YCRCB_420;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ dev_dbg(xscaler->xvip.dev,
+ "Vscaler Input Media Format YUV 422");
+ video_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ dev_dbg(xscaler->xvip.dev,
+ "Vscaler Input Media Format YUV 444");
+ video_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ dev_dbg(xscaler->xvip.dev,
+ "Vscaler Input Media Format RGB");
+ video_in = XVIDC_CSF_RGB;
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Vscaler Unsupported Input Media Format 0x%x",
+ code_in);
+ return -EINVAL;
+ }
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ video_in);
+ /*
+ * Vscaler will upscale to YUV 422 before
+ * Hscaler starts operation
+ */
+ if (video_in == XVIDC_CSF_YCRCB_420)
+ return XVIDC_CSF_YCRCB_422;
+ return video_in;
+}
+
+static int xv_hscaler_setup_video_fmt(struct xscaler_device *xscaler,
+ u32 code_out, u32 vsc_out)
+{
+ u32 video_out;
+
+ switch (vsc_out) {
+ case XVIDC_CSF_YCRCB_422:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Input Media Format is YUV 422");
+ break;
+ case XVIDC_CSF_YCRCB_444:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Input Media Format is YUV 444");
+ break;
+ case XVIDC_CSF_RGB:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Input Media Format is RGB");
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Hscaler got unsupported format from Vscaler");
+ return -EINVAL;
+ }
+
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ vsc_out);
+
+ switch (code_out) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Output Media Format YUV 420\n");
+ video_out = XVIDC_CSF_YCRCB_420;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Output Media Format YUV 422\n");
+ video_out = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Output Media Format YUV 444\n");
+ video_out = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Output Media Format RGB\n");
+ video_out = XVIDC_CSF_RGB;
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Hscaler Unsupported Output Media Format 0x%x",
+ code_out);
+ return -EINVAL;
+ }
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA,
+ video_out);
+ return 0;
+}
+
+static void
+xv_hscaler_set_phases(struct xscaler_device *xscaler)
+{
+ u32 loop_width;
+ u32 index, val;
+ u32 offset, i, lsb, msb;
+
+ loop_width = xscaler->max_pixels / xscaler->pix_per_clk;
+ offset = V_HSCALER_OFF + XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE;
+
+ switch (xscaler->pix_per_clk) {
+ case XSCALER_PPC_1:
+ /*
+ * phaseH is 64 bits but only lower 16 bits of each entry
+ * is valid .Form a 32 bit word with 16bit LSB from 2
+ * consecutive entries. Need 1 32b write to get 2 entries
+ * into IP registers (i is array loc and index is
+ * address offset)
+ */
+ index = 0;
+ for (i = 0; i < loop_width; i += 2) {
+ lsb = xscaler->H_phases[i] & XHSC_MASK_LOW_16BITS;
+ msb = xscaler->H_phases[i + 1] & XHSC_MASK_LOW_16BITS;
+ val = (msb << 16 | lsb);
+ xvip_write(&xscaler->xvip, offset + (index * 4), val);
+ ++index;
+ }
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Operating in 1 PPC design", __func__);
+ return;
+ case XSCALER_PPC_2:
+ /*
+ * PhaseH is 64bits but only lower 32b of each entry is valid
+ * Need 1 32b write to get each entry into IP registers
+ */
+ for (i = 0; i < loop_width; i++) {
+ val = (xscaler->H_phases[i] &
+ XHSC_MASK_LOW_32BITS);
+ xvip_write(&xscaler->xvip, offset + (i * 4), val);
+ }
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Operating in 2 PPC design", __func__);
+ return;
+ }
+}
+
+static int xscaler_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ u32 width_in, width_out;
+ u32 height_in, height_out;
+ u32 code_in, code_out;
+ u32 pixel_rate;
+ u32 line_rate;
+ int ret;
+
+ if (!enable) {
+ dev_dbg(xscaler->xvip.dev, "%s: Stream Off", __func__);
+ /* Reset the Global IP Reset through PS GPIO */
+ gpiod_set_value_cansleep(xscaler->rst_gpio,
+ XSCALER_RESET_ASSERT);
+ gpiod_set_value_cansleep(xscaler->rst_gpio,
+ XSCALER_RESET_DEASSERT);
+ xscaler_reset(xscaler);
+ memset(xscaler->H_phases, 0, sizeof(xscaler->H_phases));
+ return 0;
+ }
+
+ dev_dbg(xscaler->xvip.dev, "%s: Stream On", __func__);
+
+ /* Extract Sink Pad Information */
+ width_in = xscaler->formats[XVIP_PAD_SINK].width;
+ height_in = xscaler->formats[XVIP_PAD_SINK].height;
+ code_in = xscaler->formats[XVIP_PAD_SINK].code;
+
+ /* Extract Source Pad Information */
+ width_out = xscaler->formats[XVIP_PAD_SOURCE].width;
+ height_out = xscaler->formats[XVIP_PAD_SOURCE].height;
+ code_out = xscaler->formats[XVIP_PAD_SOURCE].code;
+
+ /*
+ * V Scaler is before H Scaler
+ * V-Scaler_setup
+ */
+ line_rate = (height_in * STEP_PRECISION) / height_out;
+
+ if (xscaler->is_polyphase) {
+ ret = xv_vscaler_select_coeff(xscaler, height_in, height_out);
+ if (ret < 0)
+ return ret;
+ xv_vscaler_set_coeff(xscaler);
+ }
+
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA, height_in);
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA, width_in);
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA, height_out);
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA, line_rate);
+ ret = xv_vscaler_setup_video_fmt(xscaler, code_in);
+ if (ret < 0)
+ return ret;
+
+ /* H-Scaler_setup */
+ pixel_rate = (width_in * STEP_PRECISION) / width_out;
+
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA, height_out);
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA, width_in);
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA, width_out);
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA, pixel_rate);
+ ret = xv_hscaler_setup_video_fmt(xscaler, code_out, ret);
+ if (ret < 0)
+ return ret;
+
+ if (xscaler->is_polyphase) {
+ ret = xv_hscaler_select_coeff(xscaler, width_in, width_out);
+ if (ret < 0)
+ return ret;
+ xv_hscaler_set_coeff(xscaler);
+ }
+
+ xv_hscaler_calculate_phases(xscaler, width_in, width_out, pixel_rate);
+ xv_hscaler_set_phases(xscaler);
+
+ /* Start Scaler sub-cores */
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xv_procss_enable_block(&xscaler->xvip, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_VIDEO_IN);
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int xscaler_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct xscaler_device *xscaler = to_scaler(subdev);
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ fse->min_width = XSCALER_MIN_WIDTH;
+ fse->max_width = xscaler->max_pixels;
+ fse->min_height = XSCALER_MIN_HEIGHT;
+ fse->max_height = xscaler->max_lines;
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__xscaler_get_pad_format(struct xscaler_device *xscaler,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xscaler->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xscaler->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xscaler_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+
+ fmt->format = *__xscaler_get_pad_format(xscaler, cfg, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int xscaler_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xscaler_get_pad_format(xscaler, cfg, fmt->pad, fmt->which);
+ *format = fmt->format;
+
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XSCALER_MIN_WIDTH, xscaler->max_pixels);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XSCALER_MIN_HEIGHT, xscaler->max_lines);
+ format->code = fmt->format.code;
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int
+xscaler_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xscaler->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xscaler->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int
+xscaler_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xscaler_video_ops = {
+ .s_stream = xscaler_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xscaler_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xscaler_enum_frame_size,
+ .get_fmt = xscaler_get_format,
+ .set_fmt = xscaler_set_format,
+};
+
+static struct v4l2_subdev_ops xscaler_ops = {
+ .video = &xscaler_video_ops,
+ .pad = &xscaler_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xscaler_internal_ops = {
+ .open = xscaler_open,
+ .close = xscaler_close,
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xscaler_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Platform Device Driver
+ */
+
+static int xscaler_parse_of(struct xscaler_device *xscaler)
+{
+ struct device *dev = xscaler->xvip.dev;
+ struct device_node *node = xscaler->xvip.dev->of_node;
+ const struct xvip_video_format *vip_format;
+ struct device_node *ports;
+ struct device_node *port;
+ int ret;
+ u32 port_id, dt_ppc;
+
+ if (xscaler->cfg->flags & XSCALER_CLK_PROP) {
+ xscaler->aclk_axis = devm_clk_get(dev, "aclk_axis");
+ if (IS_ERR(xscaler->aclk_axis)) {
+ ret = PTR_ERR(xscaler->aclk_axis);
+ dev_err(dev, "failed to get aclk_axis (%d)\n", ret);
+ return ret;
+ }
+ xscaler->aclk_ctrl = devm_clk_get(dev, "aclk_ctrl");
+ if (IS_ERR(xscaler->aclk_ctrl)) {
+ ret = PTR_ERR(xscaler->aclk_ctrl);
+ dev_err(dev, "failed to get aclk_ctrl (%d)\n", ret);
+ return ret;
+ }
+ } else {
+ dev_info(dev, "assuming all required clocks are enabled!\n");
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height",
+ &xscaler->max_lines);
+ if (ret < 0) {
+ dev_err(dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (xscaler->max_lines > XSCALER_MAX_HEIGHT ||
+ xscaler->max_lines < XSCALER_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width",
+ &xscaler->max_pixels);
+ if (ret < 0) {
+ dev_err(dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (xscaler->max_pixels > XSCALER_MAX_WIDTH ||
+ xscaler->max_pixels < XSCALER_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "No reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "Invalid reg in DT");
+ return -EINVAL;
+ }
+ xscaler->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-hori-taps",
+ &xscaler->num_hori_taps);
+ if (ret < 0)
+ return ret;
+
+ switch (xscaler->num_hori_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_HSCALER_TAPS_4:
+ xscaler->is_polyphase = false;
+ break;
+ case XV_HSCALER_TAPS_6:
+ case XV_HSCALER_TAPS_8:
+ case XV_HSCALER_TAPS_10:
+ case XV_HSCALER_TAPS_12:
+ xscaler->is_polyphase = true;
+ break;
+ default:
+ dev_err(dev, "Unsupported num-hori-taps %d",
+ xscaler->num_hori_taps);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-vert-taps",
+ &xscaler->num_vert_taps);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * For Bilinear and Bicubic case
+ * number of vertical and horizontal taps must match
+ */
+ switch (xscaler->num_vert_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_VSCALER_TAPS_4:
+ if (xscaler->num_vert_taps != xscaler->num_hori_taps) {
+ dev_err(dev,
+ "H-scaler taps %d mismatches V-scaler taps %d",
+ xscaler->num_hori_taps,
+ xscaler->num_vert_taps);
+ return -EINVAL;
+ }
+ break;
+ case XV_VSCALER_TAPS_6:
+ case XV_VSCALER_TAPS_8:
+ case XV_VSCALER_TAPS_10:
+ case XV_VSCALER_TAPS_12:
+ xscaler->is_polyphase = true;
+ break;
+ default:
+ dev_err(dev, "Unsupported num-vert-taps %d",
+ xscaler->num_vert_taps);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,pix-per-clk", &dt_ppc);
+ if (ret < 0)
+ return ret;
+
+ /* Driver only supports 1 PPC and 2 PPC */
+ if (dt_ppc != XSCALER_PPC_1 && dt_ppc != XSCALER_PPC_2) {
+ dev_err(xscaler->xvip.dev,
+ "Unsupported xlnx,pix-per-clk(%d) value in DT", dt_ppc);
+ return -EINVAL;
+ }
+ xscaler->pix_per_clk = dt_ppc;
+
+ /* Reset GPIO */
+ xscaler->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xscaler->rst_gpio)) {
+ if (PTR_ERR(xscaler->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(xscaler->rst_gpio);
+ }
+
+ return 0;
+}
+
+static int xscaler_probe(struct platform_device *pdev)
+{
+ struct xscaler_device *xscaler;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ int ret;
+ const struct of_device_id *match;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res;
+
+ xscaler = devm_kzalloc(&pdev->dev, sizeof(*xscaler), GFP_KERNEL);
+ if (!xscaler)
+ return -ENOMEM;
+
+ xscaler->xvip.dev = &pdev->dev;
+
+ match = of_match_node(xscaler_of_id_table, node);
+ if (!match)
+ return -ENODEV;
+
+ if (!strncmp(match->compatible, xscaler_of_id_table[0].compatible,
+ strlen(xscaler_of_id_table[0].compatible))) {
+ dev_warn(&pdev->dev,
+ "%s - compatible string is getting deprecated!\n",
+ match->compatible);
+ }
+
+ xscaler->cfg = match->data;
+
+ ret = xscaler_parse_of(xscaler);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize coefficient parameters */
+ xscaler->max_num_phases = XSCALER_MAX_PHASES;
+
+ if (xscaler->cfg->flags & XSCALER_CLK_PROP) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xscaler->xvip.iomem = devm_ioremap_resource(xscaler->xvip.dev,
+ res);
+ if (IS_ERR(xscaler->xvip.iomem))
+ return PTR_ERR(xscaler->xvip.iomem);
+
+ ret = clk_prepare_enable(xscaler->aclk_axis);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable aclk_axis (%d)\n",
+ ret);
+ goto res_cleanup;
+ }
+
+ ret = clk_prepare_enable(xscaler->aclk_ctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable aclk_ctrl (%d)\n",
+ ret);
+ goto axis_clk_cleanup;
+ }
+ } else {
+ ret = xvip_init_resources(&xscaler->xvip);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Reset the Global IP Reset through a PS GPIO */
+ gpiod_set_value_cansleep(xscaler->rst_gpio, XSCALER_RESET_DEASSERT);
+ /* Reset internal GPIO within the IP */
+ xscaler_reset(xscaler);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xscaler->xvip.subdev;
+ v4l2_subdev_init(subdev, &xscaler_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xscaler_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xscaler);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xscaler->default_formats[XVIP_PAD_SINK];
+ default_format->code = xscaler->vip_formats[XVIP_PAD_SINK]->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ default_format->width = XSCALER_DEF_IN_WIDTH;
+ default_format->height = XSCALER_DEF_IN_HEIGHT;
+ xscaler->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xscaler->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xscaler->default_formats[XVIP_PAD_SINK];
+ default_format->code = xscaler->vip_formats[XVIP_PAD_SOURCE]->code;
+ default_format->width = XSCALER_DEF_OUT_WIDTH;
+ default_format->height = XSCALER_DEF_OUT_HEIGHT;
+ xscaler->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xscaler->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xscaler->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xscaler_media_ops;
+
+ ret = media_entity_pads_init(&subdev->entity, 2, xscaler->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xscaler);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev");
+ goto error;
+ }
+ dev_info(xscaler->xvip.dev, "Num Hori Taps %d",
+ xscaler->num_hori_taps);
+ dev_info(xscaler->xvip.dev, "Num Vert Taps %d",
+ xscaler->num_vert_taps);
+ dev_info(&pdev->dev, "VPSS Scaler Probe Successful");
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ clk_disable_unprepare(xscaler->aclk_ctrl);
+axis_clk_cleanup:
+ clk_disable_unprepare(xscaler->aclk_axis);
+res_cleanup:
+ xvip_cleanup_resources(&xscaler->xvip);
+ return ret;
+}
+
+static int xscaler_remove(struct platform_device *pdev)
+{
+ struct xscaler_device *xscaler = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xscaler->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ clk_disable_unprepare(xscaler->aclk_ctrl);
+ clk_disable_unprepare(xscaler->aclk_axis);
+ xvip_cleanup_resources(&xscaler->xvip);
+
+ return 0;
+}
+
+static struct platform_driver xscaler_driver = {
+ .driver = {
+ .name = "xilinx-vpss-scaler",
+ .of_match_table = xscaler_of_id_table,
+ },
+ .probe = xscaler_probe,
+ .remove = xscaler_remove,
+};
+
+module_platform_driver(xscaler_driver);
+MODULE_DESCRIPTION("Xilinx Scaler VPSS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.c b/drivers/media/platform/xilinx/xilinx-vtc.c
index 0ae0208d7529..d69f48dfdac4 100644
--- a/drivers/media/platform/xilinx/xilinx-vtc.c
+++ b/drivers/media/platform/xilinx/xilinx-vtc.c
@@ -141,6 +141,9 @@
#define XVTC_GENERATOR_GLOBAL_DELAY 0x0104
+/* Value of 1 = .01% */
+#define XVTC_CLK_MAX_PCT_ERR 1
+
/**
* struct xvtc_device - Xilinx Video Timing Controller device structure
* @xvip: Xilinx Video IP device
@@ -175,10 +178,25 @@ int xvtc_generator_start(struct xvtc_device *xvtc,
const struct xvtc_config *config)
{
int ret;
+ unsigned long s_rate;
+ unsigned long g_rate;
+ unsigned long clk_err;
if (!xvtc->has_generator)
return -ENXIO;
+ s_rate = config->fps * config->hsize * config->vsize;
+ ret = clk_set_rate(xvtc->xvip.clk, s_rate);
+ if (ret < 0)
+ return ret;
+
+ /* Verify that the clock is within a reasonable tolerance. */
+ g_rate = clk_get_rate(xvtc->xvip.clk);
+ clk_err = (abs(g_rate - s_rate) * 10000) / (s_rate);
+ if (clk_err > XVTC_CLK_MAX_PCT_ERR)
+ dev_warn(xvtc->xvip.dev, "Failed to set clk rate: %lu, actual rate: %lu\n",
+ s_rate, g_rate);
+
ret = clk_prepare_enable(xvtc->xvip.clk);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.h b/drivers/media/platform/xilinx/xilinx-vtc.h
index 90cf44245283..c2c744b03426 100644
--- a/drivers/media/platform/xilinx/xilinx-vtc.h
+++ b/drivers/media/platform/xilinx/xilinx-vtc.h
@@ -27,6 +27,7 @@ struct xvtc_config {
unsigned int vsync_start;
unsigned int vsync_end;
unsigned int vsize;
+ unsigned int fps;
};
struct xvtc_device *xvtc_of_get(struct device_node *np);
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index da72577c2998..caa1c48517ae 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -185,6 +185,18 @@ static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
static void uvc_stop_streaming(struct vb2_queue *vq)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+ struct uvc_streaming *stream = uvc_queue_to_stream(queue);
+
+ /* Prevent new buffers coming in. */
+ spin_lock_irq(&queue->irqlock);
+ queue->flags |= UVC_QUEUE_STOPPING;
+ spin_unlock_irq(&queue->irqlock);
+
+ /*
+ * All pending work should be completed before disabling the stream, as
+ * all URBs will be free'd during uvc_video_enable(s, 0).
+ */
+ flush_workqueue(stream->async_wq);
lockdep_assert_irqs_enabled();
@@ -193,6 +205,7 @@ static void uvc_stop_streaming(struct vb2_queue *vq)
spin_lock_irq(&queue->irqlock);
uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
+ queue->flags &= ~UVC_QUEUE_STOPPING;
spin_unlock_irq(&queue->irqlock);
}
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 8fa77a81dd7f..233d51a23939 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1097,6 +1097,20 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return data[0];
}
+static void uvc_video_copy_packets(struct uvc_urb *uvc_urb)
+{
+ unsigned int i;
+
+ for (i = 0; i < uvc_urb->async_operations; i++) {
+ struct uvc_copy_op *op = &uvc_urb->copy_operations[i];
+
+ memcpy(op->dst, op->src, op->len);
+
+ /* Release reference taken on this buffer. */
+ uvc_queue_buffer_release(op->buf);
+ }
+}
+
/*
* uvc_video_decode_data_work: Asynchronous memcpy processing
*
@@ -1106,22 +1120,26 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
static void uvc_video_copy_data_work(struct work_struct *work)
{
struct uvc_urb *uvc_urb = container_of(work, struct uvc_urb, work);
- unsigned int i;
+ struct uvc_streaming *stream = uvc_urb->stream;
+ struct uvc_video_queue *queue = &stream->queue;
int ret;
- for (i = 0; i < uvc_urb->async_operations; i++) {
- struct uvc_copy_op *op = &uvc_urb->copy_operations[i];
-
- memcpy(op->dst, op->src, op->len);
+ uvc_video_copy_packets(uvc_urb);
- /* Release reference taken on this buffer. */
- uvc_queue_buffer_release(op->buf);
+ /*
+ * Prevent resubmitting URBs when shutting down to ensure that no new
+ * work item will be scheduled after uvc_stop_streaming() flushes the
+ * work queue.
+ */
+ spin_lock_irq(&queue->irqlock);
+ if (!(queue->flags & UVC_QUEUE_STOPPING)) {
+ ret = usb_submit_urb(uvc_urb->urb, GFP_ATOMIC);
+ if (ret < 0)
+ uvc_printk(KERN_ERR,
+ "Failed to resubmit video URB (%d).\n",
+ ret);
}
-
- ret = usb_submit_urb(uvc_urb->urb, GFP_KERNEL);
- if (ret < 0)
- uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n",
- ret);
+ spin_unlock_irq(&queue->irqlock);
}
static void uvc_video_decode_data(struct uvc_urb *uvc_urb,
@@ -1524,6 +1542,10 @@ static void uvc_video_complete(struct urb *urb)
buf = uvc_queue_get_current_buffer(queue);
+ /*
+ * Process the URB headers, and optionally queue expensive memcpy tasks
+ * to be deferred to a work queue.
+ */
if (vb2_qmeta) {
spin_lock_irqsave(&qmeta->irqlock, flags);
if (!list_empty(&qmeta->irqqueue))
@@ -1541,7 +1563,7 @@ static void uvc_video_complete(struct urb *urb)
*/
stream->decode(uvc_urb, buf, buf_meta);
- /* If no async work is needed, resubmit the URB immediately. */
+ /* Without any queued work, we must submit the URB. */
if (!uvc_urb->async_operations) {
ret = usb_submit_urb(uvc_urb->urb, GFP_ATOMIC);
if (ret < 0)
@@ -1551,7 +1573,21 @@ static void uvc_video_complete(struct urb *urb)
return;
}
- queue_work(stream->async_wq, &uvc_urb->work);
+ /*
+ * When the stream is stopped, all URBs are freed as part of the call to
+ * uvc_stop_streaming() and must not be handled asynchronously. In that
+ * event we can safely complete the packet work directly in this
+ * context, without resubmitting the URB.
+ */
+ spin_lock_irqsave(&queue->irqlock, flags);
+ if (!(queue->flags & UVC_QUEUE_STOPPING)) {
+ /* Handle any heavy lifting required */
+ INIT_WORK(&uvc_urb->work, uvc_video_copy_data_work);
+ queue_work(stream->async_wq, &uvc_urb->work);
+ } else {
+ uvc_video_copy_packets(uvc_urb);
+ }
+ spin_unlock_irqrestore(&queue->irqlock, flags);
}
/*
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 24e3d8c647e7..4ee465ade494 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -173,9 +173,9 @@
#define DRIVER_VERSION "1.1.1"
/* Number of isochronous URBs. */
-#define UVC_URBS 5
+#define UVC_URBS 50
/* Maximum number of packets per URB. */
-#define UVC_MAX_PACKETS 32
+#define UVC_MAX_PACKETS 48
/* Maximum status buffer size in bytes of interrupt URB. */
#define UVC_MAX_STATUS_SIZE 16
@@ -421,6 +421,7 @@ struct uvc_buffer {
#define UVC_QUEUE_DISCONNECTED (1 << 0)
#define UVC_QUEUE_DROP_CORRUPTED (1 << 1)
+#define UVC_QUEUE_STOPPING (1 << 2)
struct uvc_video_queue {
struct vb2_queue queue;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 1f261eac4e0b..7fc18bd5ef99 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/version.h>
+#include <linux/v4l2-subdev.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
@@ -1186,6 +1187,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_RGB32: descr = "32-bit A/XRGB 8-8-8-8"; break;
case V4L2_PIX_FMT_ARGB32: descr = "32-bit ARGB 8-8-8-8"; break;
case V4L2_PIX_FMT_XRGB32: descr = "32-bit XRGB 8-8-8-8"; break;
+ case V4L2_PIX_FMT_BGRX32: descr = "32-bit XBGR 8-8-8-8"; break;
+ case V4L2_PIX_FMT_XBGR30: descr = "32-bit XBGR 2-10-10-10"; break;
+ case V4L2_PIX_FMT_XBGR40: descr = "40-bit XBGR 4-12-12-12"; break;
+ case V4L2_PIX_FMT_BGR48: descr = "48-bit BGR 16-16-16"; break;
case V4L2_PIX_FMT_GREY: descr = "8-bit Greyscale"; break;
case V4L2_PIX_FMT_Y4: descr = "4-bit Greyscale"; break;
case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break;
@@ -1213,6 +1218,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_YUV411P: descr = "Planar YUV 4:1:1"; break;
case V4L2_PIX_FMT_Y41P: descr = "YUV 4:1:1 (Packed)"; break;
case V4L2_PIX_FMT_YUV444: descr = "16-bit A/XYUV 4-4-4-4"; break;
+ case V4L2_PIX_FMT_XVUY32: descr = "32-bit packed XVUY 8-8-8-8"; break;
+ case V4L2_PIX_FMT_AVUY32: descr = "32-bit packed AVUY 8-8-8-8"; break;
+ case V4L2_PIX_FMT_VUY24: descr = "24-bit packed VUY 8-8-8"; break;
case V4L2_PIX_FMT_YUV555: descr = "16-bit A/XYUV 1-5-5-5"; break;
case V4L2_PIX_FMT_YUV565: descr = "16-bit YUV 5-6-5"; break;
case V4L2_PIX_FMT_YUV32: descr = "32-bit A/XYUV 8-8-8-8"; break;
@@ -1225,18 +1233,23 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break;
case V4L2_PIX_FMT_HM12: descr = "YUV 4:2:0 (16x16 Macroblocks)"; break;
case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break;
+ case V4L2_PIX_FMT_XVUY10: descr = "XVUY 2-10-10-10"; break;
case V4L2_PIX_FMT_NV12: descr = "Y/CbCr 4:2:0"; break;
case V4L2_PIX_FMT_NV21: descr = "Y/CrCb 4:2:0"; break;
case V4L2_PIX_FMT_NV16: descr = "Y/CbCr 4:2:2"; break;
case V4L2_PIX_FMT_NV61: descr = "Y/CrCb 4:2:2"; break;
case V4L2_PIX_FMT_NV24: descr = "Y/CbCr 4:4:4"; break;
case V4L2_PIX_FMT_NV42: descr = "Y/CrCb 4:4:4"; break;
+ case V4L2_PIX_FMT_XV15: descr = "Y/CrCb 4:2:0 10-bit"; break;
+ case V4L2_PIX_FMT_XV20: descr = "Y/CrCb 4:2:2 10-bit"; break;
case V4L2_PIX_FMT_NV12M: descr = "Y/CbCr 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV21M: descr = "Y/CrCb 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV16M: descr = "Y/CbCr 4:2:2 (N-C)"; break;
case V4L2_PIX_FMT_NV61M: descr = "Y/CrCb 4:2:2 (N-C)"; break;
case V4L2_PIX_FMT_NV12MT: descr = "Y/CbCr 4:2:0 (64x32 MB, N-C)"; break;
case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/CbCr 4:2:0 (16x16 MB, N-C)"; break;
+ case V4L2_PIX_FMT_XV20M: descr = "Y/CrCb 4:2:2 10-bit (N-C)"; break;
+ case V4L2_PIX_FMT_XV15M: descr = "Y/CrCb 4:2:0 10-bit (N-C)"; break;
case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break;
@@ -2967,6 +2980,23 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
}
break;
}
+
+ case VIDIOC_SUBDEV_G_ROUTING:
+ case VIDIOC_SUBDEV_S_ROUTING: {
+ struct v4l2_subdev_routing *route = parg;
+
+ if (route->num_routes > 0) {
+ if (route->num_routes > 256)
+ return -EINVAL;
+
+ *user_ptr = (void __user *)route->routes;
+ *kernel_ptr = (void *)&route->routes;
+ *array_size = sizeof(struct v4l2_plane)
+ * route->num_routes;
+ ret = 1;
+ }
+ break;
+ }
}
return ret;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index f24978b80440..3189399ef0b0 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -486,6 +486,33 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_S_DV_TIMINGS:
return v4l2_subdev_call(sd, video, s_dv_timings, arg);
+ case VIDIOC_SUBDEV_G_ROUTING:
+ return v4l2_subdev_call(sd, pad, get_routing, arg);
+
+ case VIDIOC_SUBDEV_S_ROUTING: {
+ struct v4l2_subdev_routing *route = arg;
+ unsigned int i;
+
+ if (route->num_routes > sd->entity.num_pads)
+ return -EINVAL;
+
+ for (i = 0; i < route->num_routes; ++i) {
+ unsigned int sink = route->routes[i].sink;
+ unsigned int source = route->routes[i].source;
+ struct media_pad *pads = sd->entity.pads;
+
+ if (sink >= sd->entity.num_pads ||
+ source >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (!(pads[sink].flags & MEDIA_PAD_FL_SINK) ||
+ !(pads[source].flags & MEDIA_PAD_FL_SOURCE))
+ return -EINVAL;
+ }
+
+ return v4l2_subdev_call(sd, pad, set_routing, route);
+ }
+
case VIDIOC_SUBDEV_G_STD:
return v4l2_subdev_call(sd, video, g_std, arg);
@@ -508,6 +535,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_QUERYSTD:
return v4l2_subdev_call(sd, video, querystd, arg);
#endif
+
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 85fc77148d19..7c7714570780 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -481,6 +481,30 @@ config PCI_ENDPOINT_TEST
Enable this configuration option to enable the host side test driver
for PCI Endpoint.
+config XILINX_SDFEC
+ tristate "Xilinx SDFEC 16"
+ help
+ This option enables support for the Xilinx SDFEC (Soft Decision
+ Forward Error Correction) driver. This enables a char driver
+ for the SDFEC.
+
+ You may select this driver if your design instantiates the
+ SDFEC(16nm) hardened block. To compile this as a module choose M.
+
+ If unsure, say N.
+
+config XILINX_TRAFGEN
+ tristate "Xilinx Traffic Generator"
+ help
+ This option enables support for the Xilinx Traffic Generator driver.
+ It is designed to generate AXI4 traffic which can be used to stress
+ different modules/interconnect connected in the system. Different
+ configurable options which are provided through sysfs entries allow
+ allow the user to generate a wide variety of traffic based on their
+ their requirements.
+
+ If unsure, say N
+
config MISC_RTSX
tristate
default MISC_RTSX_PCI || MISC_RTSX_USB
@@ -493,6 +517,7 @@ config PVPANIC
a paravirtualized device provided by QEMU; it lets a virtual machine
(guest) communicate panic events to the host.
+source "drivers/misc/jesd204b/Kconfig"
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index b9affcdaa3d6..5ecfda4f080d 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -49,6 +49,9 @@ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
+obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
+obj-$(CONFIG_XILINX_TRAFGEN) += xilinx_trafgen.o
+obj-$(CONFIG_XILINX_JESD204B) += jesd204b/
obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
diff --git a/drivers/misc/jesd204b/Kconfig b/drivers/misc/jesd204b/Kconfig
new file mode 100644
index 000000000000..aff08cfe8f82
--- /dev/null
+++ b/drivers/misc/jesd204b/Kconfig
@@ -0,0 +1,28 @@
+#
+# Jesd204b support
+#
+
+config XILINX_JESD204B
+ tristate "Xilinx JESD204B"
+ help
+ This option enables support for the Xilinx JESD204B driver.
+ It is designed to allow user to access JESD204B IP registers
+ with sysfs entries. JESD204B is the protocol used by High-Speed
+ data converters to transfer data to FPGA/ASIC.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called jesd204b.
+
+config XILINX_JESD204B_PHY
+ tristate "JESD Phy Driver"
+ depends on XILINX_JESD204B
+ help
+ This is JESD204b Phy Interface. It enables support for xilinx jesd204b phy
+ controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called jesd204b_phy.
diff --git a/drivers/misc/jesd204b/Makefile b/drivers/misc/jesd204b/Makefile
new file mode 100644
index 000000000000..7723fcb002c2
--- /dev/null
+++ b/drivers/misc/jesd204b/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_XILINX_JESD204B_PHY) += jesd204b_phy.o
+jesd204b_phy-y += jesd_phy.o gtx7s_cpll_bands.o \
+ gtx7s_qpll_bands.o
+obj-$(CONFIG_XILINX_JESD204B) += jesd204b.o
+jesd204b-y += xilinx_jesd204b.o
diff --git a/drivers/misc/jesd204b/gtx7s_cpll_bands.c b/drivers/misc/jesd204b/gtx7s_cpll_bands.c
new file mode 100644
index 000000000000..a9610f7ade67
--- /dev/null
+++ b/drivers/misc/jesd204b/gtx7s_cpll_bands.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+#include "s7_gtxe2_drp.h"
+#include "gtx7s_cpll_bands.h"
+
+static const u32 gtx7s_cpll_channel_address_lut
+ [GTX7S_CPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_ADDR,
+ RXCDR_CFG1_ADDR,
+ RXCDR_CFG2_ADDR,
+ RXCDR_CFG3_ADDR,
+ RXCDR_CFG4_ADDR,
+ RXOUT_DIV_ADDR,
+ TXOUT_DIV_ADDR,
+ RX_DFE_LPM_CFG_ADDR
+};
+
+static const u32 gtx7s_cpll_channel_offset_lut
+ [GTX7S_CPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_OFFSET,
+ RXCDR_CFG1_OFFSET,
+ RXCDR_CFG2_OFFSET,
+ RXCDR_CFG3_OFFSET,
+ RXCDR_CFG4_OFFSET,
+ RXOUT_DIV_OFFSET,
+ TXOUT_DIV_OFFSET,
+ RX_DFE_LPM_CFG_OFFSET
+};
+
+static const u32 gtx7s_cpll_channel_mask_lut
+ [GTX7S_CPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_MASK,
+ RXCDR_CFG1_MASK,
+ RXCDR_CFG2_MASK,
+ RXCDR_CFG3_MASK,
+ RXCDR_CFG4_MASK,
+ RXOUT_DIV_MASK,
+ TXOUT_DIV_MASK,
+ RX_DFE_LPM_CFG_MASK
+};
+
+/* Note bands run vertically from 1 to 4 */
+static const u16 gtx7s_cpll_channel_param_lut[GTX7S_CPLL_NUM_CHANNEL_DRP_REGS]
+ [GTX7S_CPLL_NUM_LINE_RATE_BANDS] = {
+ {0x20, 0x20, 0x20, 0x20 },/* RXCDR_CFG0 */
+ {0x1010, 0x1020, 0x1040, 0x1040 },/* RXCDR_CFG1 */
+ {0x23ff, 0x23ff, 0x23ff, 0x23ff },/* RXCDR_CFG2 */
+ {0x0, 0x0, 0x0, 0x0 },/* RXCDR_CFG3 */
+ {0x3, 0x3, 0x3, 0x3 },/* RXCDR_CFG4 */
+ {0x3, 0x2, 0x1, 0x1 },/* RXOUT_DIV */
+ {0x3, 0x2, 0x1, 0x1 },/* TXOUT_DIV */
+ {0x904, 0x904, 0x904, 0x104 } /* RX_DFE_LPM_CFG */
+};
+
+u32 get_gtx7s_cpll_address_lut(u32 lut_address)
+{
+ return gtx7s_cpll_channel_address_lut[lut_address];
+}
+
+u32 get_gtx7s_cpll_offset_lut(u32 lut_address)
+{
+ return gtx7s_cpll_channel_offset_lut[lut_address];
+}
+
+u32 get_gtx7s_cpll_mask_lut(u32 lut_address)
+{
+ return gtx7s_cpll_channel_mask_lut[lut_address];
+}
+
+u16 get_gtx7s_cpll_param_lut(u32 param_address, u32 band_address)
+{
+ return gtx7s_cpll_channel_param_lut[param_address][band_address];
+}
diff --git a/drivers/misc/jesd204b/gtx7s_cpll_bands.h b/drivers/misc/jesd204b/gtx7s_cpll_bands.h
new file mode 100644
index 000000000000..f53f20de2cda
--- /dev/null
+++ b/drivers/misc/jesd204b/gtx7s_cpll_bands.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/types.h>
+
+#ifndef GTX7S_CPLL_BANDS_H_
+#define GTX7S_CPLL_BANDS_H_
+
+#define GTX7S_CPLL_NUM_CHANNEL_DRP_REGS 8
+#define GTX7S_CPLL_NUM_LINE_RATE_BANDS 4
+
+u32 get_gtx7s_cpll_address_lut(u32);
+u32 get_gtx7s_cpll_offset_lut(u32);
+u32 get_gtx7s_cpll_mask_lut(u32);
+u16 get_gtx7s_cpll_param_lut(u32, u32);
+
+#endif /* GTX7S_CPLL_BANDS_H_ */
diff --git a/drivers/misc/jesd204b/gtx7s_qpll_bands.c b/drivers/misc/jesd204b/gtx7s_qpll_bands.c
new file mode 100644
index 000000000000..71e70a611bb7
--- /dev/null
+++ b/drivers/misc/jesd204b/gtx7s_qpll_bands.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+#include "s7_gtxe2_drp.h"
+#include "gtx7s_qpll_bands.h"
+
+static const u32 gtx7s_qpll_channel_address_lut
+ [GTX7S_QPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_ADDR,
+ RXCDR_CFG1_ADDR,
+ RXCDR_CFG2_ADDR,
+ RXCDR_CFG3_ADDR,
+ RXCDR_CFG4_ADDR,
+ RXOUT_DIV_ADDR,
+ TXOUT_DIV_ADDR,
+ RX_DFE_LPM_CFG_ADDR,
+ QPLL_CFG0_ADDR,
+ QPLL_CFG1_ADDR
+};
+
+static const u32 gtx7s_qpll_channel_offset_lut
+ [GTX7S_QPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_OFFSET,
+ RXCDR_CFG1_OFFSET,
+ RXCDR_CFG2_OFFSET,
+ RXCDR_CFG3_OFFSET,
+ RXCDR_CFG4_OFFSET,
+ RXOUT_DIV_OFFSET,
+ TXOUT_DIV_OFFSET,
+ RX_DFE_LPM_CFG_OFFSET,
+ QPLL_CFG0_OFFSET,
+ QPLL_CFG1_OFFSET
+};
+
+static const u32 gtx7s_qpll_channel_mask_lut
+ [GTX7S_QPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_MASK,
+ RXCDR_CFG1_MASK,
+ RXCDR_CFG2_MASK,
+ RXCDR_CFG3_MASK,
+ RXCDR_CFG4_MASK,
+ RXOUT_DIV_MASK,
+ TXOUT_DIV_MASK,
+ RX_DFE_LPM_CFG_MASK,
+ QPLL_CFG0_MASK,
+ QPLL_CFG1_MASK
+};
+
+/* Note bands run vertically from 1 to 10 */
+static const u16 gtx7s_qpll_channel_param_lut[GTX7S_QPLL_NUM_CHANNEL_DRP_REGS]
+ [GTX7S_QPLL_NUM_LINE_RATE_BANDS] = {
+{0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20},/* RXCDR_CFG0 */
+{0x1008, 0x1010, 0x1020, 0x1010, 0x1020, 0x1040, 0x1020, 0x1040, 0x1040, 0x1040},/* RXCDR_CFG1 */
+{0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff},/* RXCDR_CFG2 */
+{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, /* RXCDR_CFG3 */
+{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},/* RXCDR_CFG4 */
+{0x3e8, 0x4, 0x2, 0x3, 0x2, 0x1, 0x2, 0x1, 0x1, 0x1},/* RXOUT_DIV */
+{0x3e8, 0x4, 0x2, 0x3, 0x2, 0x1, 0x2, 0x1, 0x1, 0x1},/* TXOUT_DIV */
+{0x904, 0x904, 0x904, 0x904, 0x904, 0x904, 0x904, 0x904, 0x104, 0x104},/* RX_DFE_LPM_CFG */
+{0x1c1, 0x1c1, 0x1c1, 0x181, 0x1c1, 0x1c1, 0x181, 0x1c1, 0x1c1, 0x181},/* QPLL_CFG0 */
+{0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68} /* QPLL_CFG1 */
+};
+
+u32 get_gtx7s_qpll_address_lut(u32 lut_address)
+{
+ return gtx7s_qpll_channel_address_lut[lut_address];
+}
+
+u32 get_gtx7s_qpll_offset_lut(u32 lut_address)
+{
+ return gtx7s_qpll_channel_offset_lut[lut_address];
+}
+
+u32 get_gtx7s_qpll_mask_lut(u32 lut_address)
+{
+ return gtx7s_qpll_channel_mask_lut[lut_address];
+}
+
+u16 get_gtx7s_qpll_param_lut(u32 param_address, u32 band_address)
+{
+ return gtx7s_qpll_channel_param_lut[param_address][band_address];
+}
diff --git a/drivers/misc/jesd204b/gtx7s_qpll_bands.h b/drivers/misc/jesd204b/gtx7s_qpll_bands.h
new file mode 100644
index 000000000000..8b9f6c24efb4
--- /dev/null
+++ b/drivers/misc/jesd204b/gtx7s_qpll_bands.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+
+#ifndef GTX7S_QPLL_BANDS_H_
+#define GTX7S_QPLL_BANDS_H_
+
+#define GTX7S_QPLL_NUM_CHANNEL_DRP_REGS 10
+#define GTX7S_QPLL_NUM_LINE_RATE_BANDS 10
+
+u32 get_gtx7s_qpll_address_lut(u32);
+u32 get_gtx7s_qpll_offset_lut(u32);
+u32 get_gtx7s_qpll_mask_lut(u32);
+u16 get_gtx7s_qpll_param_lut(u32, u32);
+
+#endif /* GTX7S_QPLL_BANDS_H_ */
diff --git a/drivers/misc/jesd204b/jesd_phy.c b/drivers/misc/jesd204b/jesd_phy.c
new file mode 100644
index 000000000000..c35d9433d0fc
--- /dev/null
+++ b/drivers/misc/jesd204b/jesd_phy.c
@@ -0,0 +1,384 @@
+/*
+ * Jesd204b phy support
+ *
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "jesd_phy.h"
+#include "gtx7s_cpll_bands.h"
+#include "gtx7s_qpll_bands.h"
+
+#define PLATFORM_JESD204_PHY_ADDR 0x41E10000
+#define JESD_PHY_LOOP_OFF 0
+#define JESD_PHY_LOOP_PCS 1
+#define JESD_PHY_LOOP_PMA 2
+#define JESD_PHY_LOOP_MAX 2
+
+static inline void jesd204b_phy_write(struct jesd204b_phy_state *st,
+ unsigned reg, unsigned val)
+{
+ iowrite32(val, st->phy + reg);
+}
+
+static inline unsigned int jesd204b_phy_read(struct jesd204b_phy_state *st,
+ unsigned reg)
+{
+ return ioread32(st->phy + reg);
+}
+
+#define NUM_GT_CHANNELS 8
+
+#define QPLL 0x3 /* QPLL (7 series) QPLL1 (UltraScale) */
+#define QPLL0 0x2 /* (UltraScale Only) */
+#define CPLL 0x0
+
+#define DRPREAD BIT(30)
+#define DRPWRITE BIT(31)
+
+#define NR_COMMON_DRP_INTERFACES 0x008
+#define NR_TRANS_DRP_INTERFACES 0x00C
+
+#define CHANNEL_DRP_BASE 0x200
+#define CHANNEL_DRP_ADDR 0x204
+#define CHANNEL_DRP_DREAD 0x20C
+#define CHANNEL_DRP_DWRITE 0x208
+#define CHANNEL_DRP_STAT 0x214
+
+#define CHANNEL_XCVR_SEL 0x400
+#define CHANNEL_XCVR_TXPLL 0x40C
+#define CHANNEL_XCVR_RXPLL 0x410
+#define CHANNEL_XCVR_LOOPB 0x41C
+
+static u32 read_channel_drp_reg(struct jesd204b_phy_state *st, u32 addr)
+{
+ u32 temp;
+
+ jesd204b_phy_write(st, CHANNEL_DRP_ADDR, (DRPREAD | addr));
+ temp = jesd204b_phy_read(st, CHANNEL_DRP_DREAD);
+ return temp;
+}
+
+static void write_channel_drp_reg(struct jesd204b_phy_state *st, u32 addr,
+ u32 data)
+{
+ u32 loop = 10;
+
+ jesd204b_phy_write(st, CHANNEL_DRP_DWRITE, data);
+ jesd204b_phy_write(st, CHANNEL_DRP_ADDR, (DRPWRITE | addr));
+
+ do {
+ if (!jesd204b_phy_read(st, CHANNEL_DRP_STAT))
+ break;
+ msleep(1);
+ } while (loop--);
+
+ if (!loop)
+ dev_err(st->dev, "DRP wait timeout\n");
+}
+
+static void read_plls(struct jesd204b_phy_state *st)
+{
+ int i;
+ int pll = st->pll;
+ u32 no_of_common_drp_interfaces = 1;
+
+ if (st->pll == CPLL)
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_TRANS_DRP_INTERFACES);
+ else
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_COMMON_DRP_INTERFACES);
+
+ for (i = 0; i < no_of_common_drp_interfaces; i++) {
+ jesd204b_phy_write(st, CHANNEL_XCVR_SEL, i);
+ pll = jesd204b_phy_read(st, CHANNEL_XCVR_TXPLL);
+ pll = jesd204b_phy_read(st, CHANNEL_XCVR_RXPLL);
+ }
+}
+
+static void configure_plls(struct jesd204b_phy_state *st, u32 pll)
+{
+ int i;
+ u32 no_of_common_drp_interfaces;
+
+ if (pll == CPLL)
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_TRANS_DRP_INTERFACES);
+ else
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_COMMON_DRP_INTERFACES);
+
+ for (i = 0; i < no_of_common_drp_interfaces; i++) {
+ jesd204b_phy_write(st, CHANNEL_XCVR_SEL, i);
+ jesd204b_phy_write(st, CHANNEL_XCVR_TXPLL, pll);
+ jesd204b_phy_write(st, CHANNEL_XCVR_RXPLL, pll);
+ }
+}
+
+static void configure_channel_drp(struct jesd204b_phy_state *st, u32 setting)
+{
+ u32 i, j, addr, temp, no_of_common_drp_interfaces;
+ u32 no_channel_drp_reg = GTX7S_QPLL_NUM_CHANNEL_DRP_REGS;
+
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_TRANS_DRP_INTERFACES);
+
+ if (st->pll == CPLL)
+ no_channel_drp_reg = GTX7S_CPLL_NUM_CHANNEL_DRP_REGS;
+ for (i = 0; i < no_of_common_drp_interfaces; i++) {
+ jesd204b_phy_write(st, CHANNEL_DRP_BASE, i);
+ for (j = 0; j < no_channel_drp_reg; j++) {
+ /* Get the register address */
+ if (st->pll == QPLL) {
+ addr = get_gtx7s_qpll_address_lut(j);
+
+ /* Read the register */
+ temp = read_channel_drp_reg(st, addr);
+
+ temp &= (0xFFFF ^ (get_gtx7s_qpll_mask_lut(j)));
+ temp |= ((get_gtx7s_qpll_param_lut(j, setting)
+ << get_gtx7s_qpll_offset_lut(j))
+ & get_gtx7s_qpll_mask_lut(j));
+ } else {
+ addr = get_gtx7s_cpll_address_lut(j);
+
+ temp = read_channel_drp_reg(st, addr);
+
+ temp &= (0xFFFF ^ (get_gtx7s_cpll_mask_lut(j)));
+ temp |= ((get_gtx7s_cpll_param_lut(j, setting)
+ << get_gtx7s_cpll_offset_lut(j))
+ & get_gtx7s_cpll_mask_lut(j));
+ }
+ write_channel_drp_reg(st, addr, temp);
+ }
+ }
+}
+
+void jesd204_phy_set_speed(struct jesd204b_phy_state *st, u32 band)
+{
+ /* make sure we have the correct PLL's selected. */
+ configure_channel_drp(st, band);
+}
+
+static void jesd204_phy_init(struct jesd204b_phy_state *st, int line_rate)
+{
+ jesd204_phy_set_speed(st, line_rate);
+}
+
+int jesd204_phy_set_loop(struct jesd204b_phy_state *st, u32 loopval)
+{
+ int i;
+ u32 no_of_channels;
+
+ no_of_channels = jesd204b_phy_read(st, NR_COMMON_DRP_INTERFACES);
+
+ if (loopval > JESD_PHY_LOOP_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < no_of_channels ; i++) {
+ jesd204b_phy_write(st, CHANNEL_XCVR_SEL, i);
+ jesd204b_phy_write(st, CHANNEL_XCVR_LOOPB, loopval);
+ }
+ return 0;
+}
+
+static ssize_t jesd204b_pll_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct jesd204b_phy_state *st = dev_get_drvdata(dev);
+
+ read_plls(st);
+ if (st->pll == CPLL)
+ return sprintf(buf, "cpll\n");
+ return sprintf(buf, "qpll\n");
+}
+
+static ssize_t jesd204b_configure_pll(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct jesd204b_phy_state *st = dev_get_drvdata(dev);
+ unsigned val;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (!ret)
+ return 0;
+
+ if (val > QPLL) {
+ dev_err(dev, "Setting the pll to %d valid values\n"
+ "00 = CPLL\n"
+ "10 = QPLL0 (UltraScale Only)\n"
+ "11 = QPLL (7 series) QPLL1 (UltraScale)\n", val);
+ return 0;
+ }
+ st->pll = val;
+ configure_plls(st, val);
+
+ return count;
+}
+
+static DEVICE_ATTR(configure_pll, S_IWUSR | S_IRUSR, jesd204b_pll_read,
+ jesd204b_configure_pll);
+
+static ssize_t jesd204b_linerate_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct jesd204b_phy_state *st = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%X\n", st->band);
+}
+
+static ssize_t jesd204b_linerate_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct jesd204b_phy_state *st = dev_get_drvdata(dev);
+ int ret;
+ /* Low frequencies are not supported by qpll */
+
+ ret = kstrtouint(buf, 0, &st->band);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Setting the line rate to band to %d\n", st->band);
+ /* QPLL - freq options in phy
+ * 62.5
+ * 78.125
+ * 94.697
+ * 97.656
+ * 125.000
+ * 156.25
+ * 187.5
+ * 189.394
+ * 195.313
+ * 234.375
+ * 250.000
+ * 284.091
+ * 292.969
+ */
+ if (st->band == 2)
+ clk_set_rate(st->clk, 62500000); /* 2.5G */
+ else if (st->band == 4)
+ clk_set_rate(st->clk, 97656000); /* 3.9G */
+ else if (st->band == 6)
+ clk_set_rate(st->clk, 125000000); /* 5G */
+ else if (st->band == 7)
+ clk_set_rate(st->clk, 156250000); /* 6.25G */
+ else if (st->band == 8)
+ clk_set_rate(st->clk, 195313000); /* 7.812G */
+ else if (st->band == 9)
+ clk_set_rate(st->clk, 250000000);/* 10G */
+
+ jesd204_phy_init(st, st->band);
+
+ return count;
+}
+
+static DEVICE_ATTR(line_rate_band, S_IWUSR | S_IRUSR, jesd204b_linerate_read,
+ jesd204b_linerate_write);
+
+/* Match table for of_platform binding */
+static const struct of_device_id jesd204b_phy_of_match[] = {
+ { .compatible = "xlnx,jesd204-phy-2.0", },
+ { /* end of list */ },
+};
+
+static int jesd204b_phy_probe(struct platform_device *pdev)
+{
+ struct jesd204b_phy_state *st;
+ struct resource *mem; /* IO mem resources */
+ int ret;
+ u32 ref_clk;
+
+ st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(st->clk))
+ return -EPROBE_DEFER;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ st->phy = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(st->phy)) {
+ dev_err(&pdev->dev, "Failed ioremap\n");
+ return PTR_ERR(st->phy);
+ }
+ st->dev = &pdev->dev;
+ platform_set_drvdata(pdev, st);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,lanes",
+ &st->lanes);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read required dt property\n");
+ return ret;
+ }
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,pll-selection",
+ &st->pll);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read required dt property\n");
+ return ret;
+ }
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,gt-refclk-freq",
+ &ref_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read required dt property\n");
+ return ret;
+ }
+
+ clk_set_rate(st->clk, (unsigned long)ref_clk);
+ device_create_file(&pdev->dev, &dev_attr_configure_pll);
+ device_create_file(&pdev->dev, &dev_attr_line_rate_band);
+
+ ret = clk_prepare_enable(st->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int jesd204b_phy_remove(struct platform_device *pdev)
+{
+ struct jesd204b_phy_state *st = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(st->clk);
+ clk_put(st->clk);
+ device_remove_file(&pdev->dev, &dev_attr_configure_pll);
+ device_remove_file(&pdev->dev, &dev_attr_line_rate_band);
+ return 0;
+}
+
+static struct platform_driver jesd204b_driver = {
+ .driver = {
+ .name = "jesd204b_phy",
+ .of_match_table = jesd204b_phy_of_match,
+ },
+ .probe = jesd204b_phy_probe,
+ .remove = jesd204b_phy_remove,
+};
+
+module_platform_driver(jesd204b_driver);
+
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhraj@xilinx.com>");
+MODULE_DESCRIPTION("AXI-JESD204B Phy Interface Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/jesd204b/jesd_phy.h b/drivers/misc/jesd204b/jesd_phy.h
new file mode 100644
index 000000000000..c15328532c3f
--- /dev/null
+++ b/drivers/misc/jesd204b/jesd_phy.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef JESD_PHY_H_
+#define JESD_PHY_H_
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+struct jesd204b_phy_state {
+ struct device *dev;
+ void __iomem *phy;
+ struct clk *clk;
+ u32 vers_id;
+ u32 addr;
+ u32 lanes;
+ u32 band;
+ u32 pll;
+ unsigned long rate;
+};
+
+int jesd204_phy_set_loop(struct jesd204b_phy_state *st, u32 loopval);
+
+#endif /* JESD_PHY_H_ */
diff --git a/drivers/misc/jesd204b/s7_gtxe2_drp.h b/drivers/misc/jesd204b/s7_gtxe2_drp.h
new file mode 100644
index 000000000000..f08a211432bd
--- /dev/null
+++ b/drivers/misc/jesd204b/s7_gtxe2_drp.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#define TXOUT_DIV_ADDR 0x88
+#define TXOUT_DIV_MASK 0x70
+#define TXOUT_DIV_OFFSET 0x4
+#define TXOUT_DIV_WIDTH 0x3
+#define TXOUT_DIV_DEFAULT 0x0
+
+#define RXOUT_DIV_ADDR 0x88
+#define RXOUT_DIV_MASK 0x7
+#define RXOUT_DIV_OFFSET 0x0
+#define RXOUT_DIV_WIDTH 0x3
+#define RXOUT_DIV_DEFAULT 0x0
+
+#define RXCDR_CFG0_ADDR 0xa8
+#define RXCDR_CFG0_MASK 0xffff
+#define RXCDR_CFG0_OFFSET 0x0
+#define RXCDR_CFG0_WIDTH 0x10
+#define RXCDR_CFG0_DEFAULT 0x0
+
+#define RXCDR_CFG1_ADDR 0xa9
+#define RXCDR_CFG1_MASK 0xffff
+#define RXCDR_CFG1_OFFSET 0x0
+#define RXCDR_CFG1_WIDTH 0x10
+#define RXCDR_CFG1_DEFAULT 0x0
+
+#define RXCDR_CFG2_ADDR 0xaa
+#define RXCDR_CFG2_MASK 0xffff
+#define RXCDR_CFG2_OFFSET 0x0
+#define RXCDR_CFG2_WIDTH 0x10
+#define RXCDR_CFG2_DEFAULT 0x0
+
+#define RXCDR_CFG3_ADDR 0xab
+#define RXCDR_CFG3_MASK 0xffff
+#define RXCDR_CFG3_OFFSET 0x0
+#define RXCDR_CFG3_WIDTH 0x10
+#define RXCDR_CFG3_DEFAULT 0x0
+
+#define RXCDR_CFG4_ADDR 0xac
+#define RXCDR_CFG4_MASK 0xff
+#define RXCDR_CFG4_OFFSET 0x0
+#define RXCDR_CFG4_WIDTH 0x8
+#define RXCDR_CFG4_DEFAULT 0x0
+
+#define RX_DFE_LPM_CFG_ADDR 0x29
+#define RX_DFE_LPM_CFG_MASK 0xffff
+#define RX_DFE_LPM_CFG_OFFSET 0x0
+#define RX_DFE_LPM_CFG_WIDTH 0x10
+#define RX_DFE_LPM_CFG_DEFAULT 0x0
+
+#define QPLL_CFG0_ADDR 0x32
+#define QPLL_CFG0_MASK 0xffff
+#define QPLL_CFG0_OFFSET 0x0
+#define QPLL_CFG0_WIDTH 0x10
+#define QPLL_CFG0_DEFAULT 0x0
+
+#define QPLL_CFG1_ADDR 0x33
+#define QPLL_CFG1_MASK 0x7ff
+#define QPLL_CFG1_OFFSET 0x0
+#define QPLL_CFG1_WIDTH 0xb
+#define QPLL_CFG1_DEFAULT 0x0
+
+#define QPLL_REFCLK_DIV_M_ADDR 0x33
+#define QPLL_REFCLK_DIV_M_MASK 0xf800
+#define QPLL_REFCLK_DIV_M_OFFSET 0xb
+#define QPLL_REFCLK_DIV_M_WIDTH 0x5
+#define QPLL_REFCLK_DIV_M_DEFAULT 0x0
+
+#define QPLL_FBDIV_N_ADDR 0x36
+#define QPLL_FBDIV_N_MASK 0x3ff
+#define QPLL_FBDIV_N_OFFSET 0x0
+#define QPLL_FBDIV_N_WIDTH 0xa
+#define QPLL_FBDIV_N_DEFAULT 0x0
+
+#define QPLL_FBDIV_RATIO_ADDR 0x37
+#define QPLL_FBDIV_RATIO_MASK 0x40
+#define QPLL_FBDIV_RATIO_OFFSET 0x6
+#define QPLL_FBDIV_RATIO_WIDTH 0x1
+#define QPLL_FBDIV_RATIO_DEFAULT 0x0
+
+#define CPLL_CFG0_ADDR 0x5c
+#define CPLL_CFG0_MASK 0xff00
+#define CPLL_CFG0_OFFSET 0x8
+#define CPLL_CFG0_WIDTH 0x8
+#define CPLL_CFG0_DEFAULT 0x0
+
+#define CPLL_CFG1_ADDR 0x5d
+#define CPLL_CFG1_MASK 0xffff
+#define CPLL_CFG1_OFFSET 0x0
+#define CPLL_CFG1_WIDTH 0x10
+#define CPLL_CFG1_DEFAULT 0x0
+
+#define CPLL_REFCLK_DIV_M_ADDR 0x5e
+#define CPLL_REFCLK_DIV_M_MASK 0x1f00
+#define CPLL_REFCLK_DIV_M_OFFSET 0x8
+#define CPLL_REFCLK_DIV_M_WIDTH 0x5
+#define CPLL_REFCLK_DIV_M_DEFAULT 0x0
+
+#define CPLL_FB_DIV_45_N1_ADDR 0x5e
+#define CPLL_FB_DIV_45_N1_MASK 0x80
+#define CPLL_FB_DIV_45_N1_OFFSET 0x7
+#define CPLL_FB_DIV_45_N1_WIDTH 0x1
+#define CPLL_FB_DIV_45_N1_DEFAULT 0x0
+
+#define CPLL_FBDIV_N2_ADDR 0x5e
+#define CPLL_FBDIV_N2_MASK 0x7f
+#define CPLL_FBDIV_N2_OFFSET 0x0
+#define CPLL_FBDIV_N2_WIDTH 0x7
+#define CPLL_FBDIV_N2_DEFAULT 0x0
diff --git a/drivers/misc/jesd204b/xilinx_jesd204b.c b/drivers/misc/jesd204b/xilinx_jesd204b.c
new file mode 100644
index 000000000000..304557c8978e
--- /dev/null
+++ b/drivers/misc/jesd204b/xilinx_jesd204b.c
@@ -0,0 +1,399 @@
+/*
+ * Xilinx AXI-JESD204B Interface Module
+ *
+ * Copyright 2014 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ *
+ * http://wiki.analog.com/resources/fpga/xilinx/
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "xilinx_jesd204b.h"
+
+struct child_clk {
+ struct clk_hw hw;
+ struct jesd204b_state *st;
+ unsigned long rate;
+ bool enabled;
+};
+
+#define to_clk_priv(_hw) container_of(_hw, struct child_clk, hw)
+
+static inline void jesd204b_write(struct jesd204b_state *st,
+ unsigned int reg, unsigned int val)
+{
+ iowrite32(val, st->regs + reg);
+}
+
+static inline unsigned int jesd204b_read(struct jesd204b_state *st,
+ unsigned int reg)
+{
+ return ioread32(st->regs + reg);
+}
+
+static ssize_t jesd204b_laneinfo_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, unsigned int lane)
+{
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+ int ret;
+ unsigned int val1, val2, val3;
+
+ val1 = jesd204b_read(st, XLNX_JESD204_REG_ID_L(lane));
+ val2 = jesd204b_read(st, XLNX_JESD204_REG_LANE_F(lane));
+ val3 = jesd204b_read(st, XLNX_JESD204_REG_SCR_S_HD_CF(lane));
+ ret = sprintf(buf,
+ "DID: %d, BID: %d, LID: %d, L: %d, SCR: %d, F: %d\n",
+ XLNX_JESD204_LANE_DID(val1),
+ XLNX_JESD204_LANE_BID(val1),
+ XLNX_JESD204_LANE_LID(val1),
+ XLNX_JESD204_LANE_L(val1),
+ XLNX_JESD204_LANE_SCR(val3),
+ XLNX_JESD204_LANE_F(val2));
+
+ val1 = jesd204b_read(st, XLNX_JESD204_REG_LANE_K(lane));
+ val2 = jesd204b_read(st, XLNX_JESD204_REG_M_N_ND_CS(lane));
+
+ ret += sprintf(buf + ret,
+ "K: %d, M: %d, N: %d, CS: %d, S: %d, N': %d, HD: %d\n",
+ XLNX_JESD204_LANE_K(val1),
+ XLNX_JESD204_LANE_M(val2),
+ XLNX_JESD204_LANE_N(val2),
+ XLNX_JESD204_LANE_CS(val2),
+ XLNX_JESD204_LANE_S(val3),
+ XLNX_JESD204_LANE_ND(val2),
+ XLNX_JESD204_LANE_HD(val3));
+
+ val1 = jesd204b_read(st, XLNX_JESD204_REG_FCHK(lane));
+ ret += sprintf(buf + ret, "FCHK: 0x%X, CF: %d\n",
+ XLNX_JESD204_LANE_FCHK(val1),
+ XLNX_JESD204_LANE_CF(val3));
+
+ val1 = jesd204b_read(st, XLNX_JESD204_REG_SC2_ADJ_CTRL(lane));
+ val2 = jesd204b_read(st, XLNX_JESD204_REG_LANE_VERSION(lane));
+ ret += sprintf(buf + ret,
+ "ADJCNT: %d, PHYADJ: %d, ADJDIR: %d, JESDV: %d, SUBCLASS: %d\n",
+ XLNX_JESD204_LANE_ADJ_CNT(val1),
+ XLNX_JESD204_LANE_PHASE_ADJ_REQ(val1),
+ XLNX_JESD204_LANE_ADJ_CNT_DIR(val1),
+ XLNX_JESD204_LANE_JESDV(val2),
+ XLNX_JESD204_LANE_SUBCLASS(val2));
+
+ ret += sprintf(buf + ret, "MFCNT : 0x%X\n",
+ jesd204b_read(st, XLNX_JESD204_REG_TM_MFC_CNT(lane)));
+ ret += sprintf(buf + ret, "ILACNT: 0x%X\n",
+ jesd204b_read(st, XLNX_JESD204_REG_TM_ILA_CNT(lane)));
+ ret += sprintf(buf + ret, "ERRCNT: 0x%X\n",
+ jesd204b_read(st, XLNX_JESD204_REG_TM_ERR_CNT(lane)));
+ ret += sprintf(buf + ret, "BUFCNT: 0x%X\n",
+ jesd204b_read(st, XLNX_JESD204_REG_TM_BUF_ADJ(lane)));
+ ret += sprintf(buf + ret, "LECNT: 0x%X\n",
+ jesd204b_read(st,
+ XLNX_JESD204_REG_TM_LINK_ERR_CNT(lane)));
+
+ ret += sprintf(buf + ret, "FC: %lu\n", st->rate);
+
+ return ret;
+}
+
+#define JESD_LANE(_x) \
+static ssize_t jesd204b_lane##_x##_info_read(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return jesd204b_laneinfo_read(dev, attr, buf, _x); \
+} \
+static DEVICE_ATTR(lane##_x##_info, 0400, jesd204b_lane##_x##_info_read, \
+ NULL)
+
+JESD_LANE(0);
+JESD_LANE(1);
+JESD_LANE(2);
+JESD_LANE(3);
+JESD_LANE(4);
+JESD_LANE(5);
+JESD_LANE(6);
+JESD_LANE(7);
+
+static ssize_t jesd204b_lane_syscstat_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, unsigned int lane)
+{
+ unsigned int stat;
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+
+ stat = jesd204b_read(st, XLNX_JESD204_REG_SYNC_ERR_STAT);
+
+ return sprintf(buf,
+ "NOT_IN_TAB: %d, DISPARITY: %d, UNEXPECTED_K: %d\n",
+ stat & XLNX_JESD204_SYNC_ERR_NOT_IN_TAB(lane),
+ stat & XLNX_JESD204_SYNC_ERR_DISPARITY(lane),
+ stat & XLNX_JESD204_SYNC_ERR_UNEXPECTED_K(lane));
+}
+
+#define JESD_SYNCSTAT_LANE(_x) \
+static ssize_t jesd204b_lane##_x##_syncstat_read(struct device *dev, \
+ struct device_attribute *attr,\
+ char *buf) \
+{ \
+ return jesd204b_lane_syscstat_read(dev, attr, buf, _x); \
+} \
+static DEVICE_ATTR(lane##_x##_syncstat, 0400, \
+ jesd204b_lane##_x##_syncstat_read, NULL)
+
+JESD_SYNCSTAT_LANE(0);
+JESD_SYNCSTAT_LANE(1);
+JESD_SYNCSTAT_LANE(2);
+JESD_SYNCSTAT_LANE(3);
+JESD_SYNCSTAT_LANE(4);
+JESD_SYNCSTAT_LANE(5);
+JESD_SYNCSTAT_LANE(6);
+JESD_SYNCSTAT_LANE(7);
+
+static ssize_t jesd204b_reg_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = sscanf(buf, "%i %i", &st->addr, &val);
+ if (ret == 2)
+ jesd204b_write(st, st->addr, val);
+
+ return count;
+}
+
+static ssize_t jesd204b_reg_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%X\n", jesd204b_read(st, st->addr));
+}
+
+static DEVICE_ATTR(reg_access, 0600, jesd204b_reg_read,
+ jesd204b_reg_write);
+
+static ssize_t jesd204b_syncreg_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%X\n", jesd204b_read(st,
+ XLNX_JESD204_REG_SYNC_STATUS));
+}
+
+static DEVICE_ATTR(sync_status, 0400, jesd204b_syncreg_read, NULL);
+
+static unsigned int long jesd204b_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return parent_rate;
+}
+
+static int jesd204b_clk_enable(struct clk_hw *hw)
+{
+ to_clk_priv(hw)->enabled = true;
+
+ return 0;
+}
+
+static void jesd204b_clk_disable(struct clk_hw *hw)
+{
+ to_clk_priv(hw)->enabled = false;
+}
+
+static int jesd204b_clk_is_enabled(struct clk_hw *hw)
+{
+ return to_clk_priv(hw)->enabled;
+}
+
+static const struct clk_ops clkout_ops = {
+ .recalc_rate = jesd204b_clk_recalc_rate,
+ .enable = jesd204b_clk_enable,
+ .disable = jesd204b_clk_disable,
+ .is_enabled = jesd204b_clk_is_enabled,
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id jesd204b_of_match[] = {
+ { .compatible = "xlnx,jesd204-5.1",},
+ { .compatible = "xlnx,jesd204-5.2",},
+ { .compatible = "xlnx,jesd204-6.1",},
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, jesd204b_of_match);
+
+static int jesd204b_probe(struct platform_device *pdev)
+{
+ struct jesd204b_state *st;
+ struct resource *mem; /* IO mem resources */
+ struct clk *clk;
+ struct child_clk *clk_priv;
+ struct clk_init_data init;
+ unsigned int val;
+ int ret;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return -EPROBE_DEFER;
+
+ st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ st->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(st->regs)) {
+ dev_err(&pdev->dev, "Failed ioremap\n");
+ return PTR_ERR(st->regs);
+ }
+
+ st->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, st);
+
+ st->clk = clk;
+ clk_set_rate(st->clk, 156250000);
+ st->rate = clk_get_rate(clk);
+
+ of_property_read_u32(pdev->dev.of_node, "xlnx,node-is-transmit",
+ &st->transmit);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,lanes",
+ &st->lanes);
+ if (ret)
+ st->lanes = jesd204b_read(st, XLNX_JESD204_REG_LANES) + 1;
+
+ jesd204b_write(st, XLNX_JESD204_REG_RESET, XLNX_JESD204_RESET);
+ while (!jesd204b_read(st, XLNX_JESD204_REG_RESET))
+ msleep(20);
+
+ jesd204b_write(st, XLNX_JESD204_REG_ILA_CTRL,
+ (of_property_read_bool(pdev->dev.of_node,
+ "xlnx,lanesync-enable") ? XLNX_JESD204_ILA_EN : 0));
+
+ jesd204b_write(st, XLNX_JESD204_REG_SCR_CTRL,
+ (of_property_read_bool(pdev->dev.of_node,
+ "xlnx,scramble-enable") ? XLNX_JESD204_SCR_EN : 0));
+
+ jesd204b_write(st, XLNX_JESD204_REG_SYSREF_CTRL,
+ (of_property_read_bool(pdev->dev.of_node,
+ "xlnx,sysref-always-enable") ?
+ XLNX_JESD204_ALWAYS_SYSREF_EN : 0));
+
+ device_create_file(&pdev->dev, &dev_attr_reg_access);
+
+ device_create_file(&pdev->dev, &dev_attr_sync_status);
+ switch (st->lanes) {
+ case 8:
+ device_create_file(&pdev->dev, &dev_attr_lane4_info);
+ device_create_file(&pdev->dev, &dev_attr_lane5_info);
+ device_create_file(&pdev->dev, &dev_attr_lane6_info);
+ device_create_file(&pdev->dev, &dev_attr_lane7_info);
+ if (!st->transmit) {
+ device_create_file(&pdev->dev,
+ &dev_attr_lane4_syncstat);
+ device_create_file(&pdev->dev,
+ &dev_attr_lane5_syncstat);
+ device_create_file(&pdev->dev,
+ &dev_attr_lane6_syncstat);
+ device_create_file(&pdev->dev,
+ &dev_attr_lane7_syncstat);
+ }
+ /* fall through */
+ case 4:
+ device_create_file(&pdev->dev, &dev_attr_lane2_info);
+ device_create_file(&pdev->dev, &dev_attr_lane3_info);
+ if (!st->transmit) {
+ device_create_file(&pdev->dev,
+ &dev_attr_lane2_syncstat);
+ device_create_file(&pdev->dev,
+ &dev_attr_lane3_syncstat);
+ }
+ /* fall through */
+ case 2:
+ device_create_file(&pdev->dev, &dev_attr_lane1_info);
+ if (!st->transmit)
+ device_create_file(&pdev->dev,
+ &dev_attr_lane1_syncstat);
+ /* fall through */
+ case 1:
+ device_create_file(&pdev->dev, &dev_attr_lane0_info);
+ if (!st->transmit)
+ device_create_file(&pdev->dev,
+ &dev_attr_lane0_syncstat);
+ break;
+ default:
+
+ break;
+ }
+
+ clk_priv = devm_kzalloc(&pdev->dev, sizeof(*clk_priv), GFP_KERNEL);
+ if (!clk_priv)
+ return -ENOMEM;
+
+ /* struct child_clk assignments */
+ clk_priv->hw.init = &init;
+ clk_priv->rate = st->rate;
+ clk_priv->st = st;
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ val = jesd204b_read(st, XLNX_JESD204_REG_VERSION);
+
+ dev_info(&pdev->dev,
+ "AXI-JESD204B %d.%d Rev %d, at 0x%08llX mapped to 0x%p",
+ XLNX_JESD204_VERSION_MAJOR(val),
+ XLNX_JESD204_VERSION_MINOR(val),
+ XLNX_JESD204_VERSION_REV(val),
+ (unsigned long long)mem->start, st->regs);
+
+ return 0;
+}
+
+static int jesd204b_remove(struct platform_device *pdev)
+{
+ struct jesd204b_state *st = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(st->clk);
+ clk_put(st->clk);
+
+ return 0;
+}
+
+static struct platform_driver jesd204b_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = jesd204b_of_match,
+ },
+ .probe = jesd204b_probe,
+ .remove = jesd204b_remove,
+};
+
+module_platform_driver(jesd204b_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AXI-JESD204B Interface Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/jesd204b/xilinx_jesd204b.h b/drivers/misc/jesd204b/xilinx_jesd204b.h
new file mode 100644
index 000000000000..b9946a723a23
--- /dev/null
+++ b/drivers/misc/jesd204b/xilinx_jesd204b.h
@@ -0,0 +1,135 @@
+/*
+ * Xilinx AXI-JESD204B v5.1 Interface Module
+ *
+ * Copyright 2014 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ *
+ * http://wiki.analog.com/resources/fpga/xilinx/
+ */
+
+#ifndef XILINX_JESD204B_H_
+#define XILINX_JESD204B_H_
+
+struct jesd204b_state {
+ struct device *dev;
+ void __iomem *regs;
+ void __iomem *phy;
+ struct clk *clk;
+ u32 lanes;
+ u32 vers_id;
+ u32 addr;
+ u32 band;
+ u32 transmit;
+ u32 pll;
+ unsigned long rate;
+};
+
+#define XLNX_JESD204_REG_VERSION 0x000
+#define XLNX_JESD204_VERSION_MAJOR(x) (((x) >> 24) & 0xFF)
+#define XLNX_JESD204_VERSION_MINOR(x) (((x) >> 16) & 0xFF)
+#define XLNX_JESD204_VERSION_REV(x) (((x) >> 8) & 0xFF)
+
+#define XLNX_JESD204_REG_RESET 0x004
+#define XLNX_JESD204_RESET (1 << 0)
+
+#define XLNX_JESD204_REG_ILA_CTRL 0x008
+#define XLNX_JESD204_ILA_EN (1 << 0)
+
+#define XLNX_JESD204_REG_SCR_CTRL 0x00C
+#define XLNX_JESD204_SCR_EN (1 << 0)
+
+#define XLNX_JESD204_REG_SYSREF_CTRL 0x010
+#define XLNX_JESD204_ALWAYS_SYSREF_EN (1 << 0)
+
+#define XLNX_JESD204_REG_ILA_MFC 0x014
+#define XLNX_JESD204_ILA_MFC(x) (((x) - 1) & 0xFF)
+ /* TX only 4..256 */
+
+#define XLNX_JESD204_REG_TEST_MODE_SEL 0x018
+#define XLNX_JESD204_TEST_MODE_OFF 0 /* Normal operation */
+#define XLNX_JESD204_TEST_MODE_K28_5 1 /* Send/Receive /K28.5/
+ * indefinitely
+ */
+#define XLNX_JESD204_TEST_MODE_ILA 2 /* Synchronize as normal then
+ * send/receive repeated ILA
+ * sequences
+ */
+#define XLNX_JESD204_TEST_MODE_D21_5 3 /* Send/Receive /D21.5/
+ * indefinitely
+ */
+#define XLNX_JESD204_TEST_MODE_RPAT 5 /* Send/Receive modified
+ * random pattern (RPAT)
+ */
+#define XLNX_JESD204_TEST_MODE_JSPAT 7 /* Send/Receive a scrambled
+ * jitter pattern (JSPAT)
+ */
+
+#define XLNX_JESD204_REG_SYNC_STATUS 0x038 /* Link SYNC status */
+#define XLNX_JESD204_REG_SYNC_ERR_STAT 0x01C /* RX only */
+#define XLNX_JESD204_SYNC_ERR_NOT_IN_TAB(lane) (1 << (0 + (lane) * 3))
+#define XLNX_JESD204_SYNC_ERR_DISPARITY(lane) (1 << (1 + (lane) * 3))
+#define XLNX_JESD204_SYNC_ERR_UNEXPECTED_K(lane) (1 << (2 + (lane) * 3))
+
+#define XLNX_JESD204_REG_OCTETS_PER_FRAME 0x020
+#define XLNX_JESD204_OCTETS_PER_FRAME(x) (((x) - 1) & 0xFF) /* 1..256 */
+
+#define XLNX_JESD204_REG_FRAMES_PER_MFRAME 0x024
+#define XLNX_JESD204_FRAMES_PER_MFRAME(x) (((x) - 1) & 0x1F) /* 1..32 */
+
+#define XLNX_JESD204_REG_LANES 0x028
+#define XLNX_JESD204_LANES(x) (((x) - 1) & 0x1F) /* 1..32 */
+
+#define XLNX_JESD204_REG_SUBCLASS 0x02C
+
+#define XLNX_JESD204_REG_RX_BUF_DELAY 0x030 /* RX only */
+#define XLNX_JESD204_RX_BUF_DELAY(x) ((x) & 0x1FFF)
+
+#define XLNX_JESD204_REG_RX_LINK_CTRL 0x034 /* RX only */
+#define XLNX_JESD204_LINK_TEST_EN (1 << 0)
+#define XLNX_JESD204_SYNC_ERR_REP_DIS (1 << 8)
+
+/* Per LANE Registers */
+#define XLNX_JESD204_REG_LANE_VERSION(l) (0x800 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_SUBCLASS(x) (((x) >> 0) & 0x7)
+#define XLNX_JESD204_LANE_JESDV(x) (((x) >> 8) & 0x7)
+
+#define XLNX_JESD204_REG_LANE_F(l) (0x804 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_F(x) ((((x) >> 0) & 0xFF) + 1)
+
+#define XLNX_JESD204_REG_LANE_K(l) (0x808 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_K(x) ((((x) >> 0) & 0x1F) + 1)
+
+#define XLNX_JESD204_REG_ID_L(l) (0x80C + ((l) * 0x40))
+#define XLNX_JESD204_LANE_DID(x) (((x) >> 0) & 0xFF)
+#define XLNX_JESD204_LANE_BID(x) (((x) >> 8) & 0x1F)
+#define XLNX_JESD204_LANE_LID(x) (((x) >> 16) & 0x1F)
+#define XLNX_JESD204_LANE_L(x) ((((x) >> 24) & 0x1F) + 1)
+
+#define XLNX_JESD204_REG_M_N_ND_CS(l) (0x810 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_M(x) ((((x) >> 0) & 0xFF) + 1)
+#define XLNX_JESD204_LANE_N(x) ((((x) >> 8) & 0x1F) + 1)
+#define XLNX_JESD204_LANE_ND(x) ((((x) >> 16) & 0x1F) + 1)
+#define XLNX_JESD204_LANE_CS(x) (((x) >> 24) & 0x3)
+
+#define XLNX_JESD204_REG_SCR_S_HD_CF(l) (0x814 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_SCR(x) (((x) >> 0) & 0x1)
+#define XLNX_JESD204_LANE_S(x) ((((x) >> 8) & 0x1F) + 1)
+#define XLNX_JESD204_LANE_HD(x) (((x) >> 16) & 0x1)
+#define XLNX_JESD204_LANE_CF(x) (((x) >> 24) & 0x1F)
+
+#define XLNX_JESD204_REG_FCHK(l) (0x818 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_FCHK(x) (((x) >> 16) & 0xFF)
+
+#define XLNX_JESD204_REG_SC2_ADJ_CTRL(l) (0x81C + ((l) * 0x40))
+#define XLNX_JESD204_LANE_ADJ_CNT(x) (((x) >> 0) & 0xF)
+#define XLNX_JESD204_LANE_PHASE_ADJ_REQ(x) (((x) >> 8) & 0x1)
+#define XLNX_JESD204_LANE_ADJ_CNT_DIR(x) (((x) >> 16) & 0x1)
+
+#define XLNX_JESD204_REG_TM_ERR_CNT(l) (0x820 + ((l) * 0x40))
+#define XLNX_JESD204_REG_TM_LINK_ERR_CNT(l) (0x824 + ((l) * 0x40))
+#define XLNX_JESD204_REG_TM_ILA_CNT(l) (0x828 + ((l) * 0x40))
+#define XLNX_JESD204_REG_TM_MFC_CNT(l) (0x82C + ((l) * 0x40))
+#define XLNX_JESD204_REG_TM_BUF_ADJ(l) (0x830 + ((l) * 0x40))
+
+#endif /* ADI_JESD204B_V51_H_ */
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
new file mode 100644
index 000000000000..ea341e96893e
--- /dev/null
+++ b/drivers/misc/xilinx_sdfec.c
@@ -0,0 +1,1683 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx SDFEC
+ *
+ * Copyright (C) 2016 - 2017 Xilinx, Inc.
+ *
+ * Description:
+ * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
+ * IP. It exposes a char device interface in sysfs and supports file
+ * operations like open(), close() and ioctl().
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+
+#include <uapi/misc/xilinx_sdfec.h>
+
+#define DRIVER_NAME "xilinx_sdfec"
+#define DRIVER_VERSION "0.3"
+#define DRIVER_MAX_DEV BIT(MINORBITS)
+
+static struct class *xsdfec_class;
+static atomic_t xsdfec_ndevs = ATOMIC_INIT(0);
+static dev_t xsdfec_devt;
+
+/* Xilinx SDFEC Register Map */
+/* CODE_WRI_PROTECT Register */
+#define XSDFEC_CODE_WR_PROTECT_ADDR (0x4)
+
+/* ACTIVE Register */
+#define XSDFEC_ACTIVE_ADDR (0x8)
+#define XSDFEC_IS_ACTIVITY_SET (0x1)
+
+/* AXIS_WIDTH Register */
+#define XSDFEC_AXIS_WIDTH_ADDR (0xC)
+#define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
+#define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
+#define XSDFEC_AXIS_DIN_WORDS_LSB (2)
+#define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
+
+/* AXIS_ENABLE Register */
+#define XSDFEC_AXIS_ENABLE_ADDR (0x10)
+#define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38)
+#define XSDFEC_AXIS_IN_ENABLE_MASK (0x7)
+#define XSDFEC_AXIS_ENABLE_MASK \
+ (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK)
+
+/* FEC_CODE Register */
+#define XSDFEC_FEC_CODE_ADDR (0x14)
+
+/* ORDER Register Map */
+#define XSDFEC_ORDER_ADDR (0x18)
+
+/* Interrupt Status Register */
+#define XSDFEC_ISR_ADDR (0x1C)
+/* Interrupt Status Register Bit Mask */
+#define XSDFEC_ISR_MASK (0x3F)
+
+/* Write Only - Interrupt Enable Register */
+#define XSDFEC_IER_ADDR (0x20)
+/* Write Only - Interrupt Disable Register */
+#define XSDFEC_IDR_ADDR (0x24)
+/* Read Only - Interrupt Mask Register */
+#define XSDFEC_IMR_ADDR (0x28)
+
+/* ECC Interrupt Status Register */
+#define XSDFEC_ECC_ISR_ADDR (0x2C)
+/* Single Bit Errors */
+#define XSDFEC_ECC_ISR_SBE_MASK (0x7FF)
+/* PL Initialize Single Bit Errors */
+#define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000)
+/* Multi Bit Errors */
+#define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800)
+/* PL Initialize Multi Bit Errors */
+#define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000)
+/* Multi Bit Error to Event Shift */
+#define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11)
+/* PL Initialize Multi Bit Error to Event Shift */
+#define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4)
+/* ECC Interrupt Status Bit Mask */
+#define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK)
+/* ECC Interrupt Status PL Initialize Bit Mask */
+#define XSDFEC_PL_INIT_ECC_ISR_MASK \
+ (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
+/* ECC Interrupt Status All Bit Mask */
+#define XSDFEC_ALL_ECC_ISR_MASK \
+ (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK)
+/* ECC Interrupt Status Single Bit Errors Mask */
+#define XSDFEC_ALL_ECC_ISR_SBE_MASK \
+ (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK)
+/* ECC Interrupt Status Multi Bit Errors Mask */
+#define XSDFEC_ALL_ECC_ISR_MBE_MASK \
+ (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
+
+/* Write Only - ECC Interrupt Enable Register */
+#define XSDFEC_ECC_IER_ADDR (0x30)
+/* Write Only - ECC Interrupt Disable Register */
+#define XSDFEC_ECC_IDR_ADDR (0x34)
+/* Read Only - ECC Interrupt Mask Register */
+#define XSDFEC_ECC_IMR_ADDR (0x38)
+
+/* BYPASS Register */
+#define XSDFEC_BYPASS_ADDR (0x3C)
+
+/* Turbo Code Register */
+#define XSDFEC_TURBO_ADDR (0x100)
+#define XSDFEC_TURBO_SCALE_MASK (0xFFF)
+#define XSDFEC_TURBO_SCALE_BIT_POS (8)
+#define XSDFEC_TURBO_SCALE_MAX (15)
+
+/* REG0 Register */
+#define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000)
+#define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0)
+#define XSDFEC_REG0_N_MIN (4)
+#define XSDFEC_REG0_N_MAX (32768)
+#define XSDFEC_REG0_N_MUL_P (256)
+#define XSDFEC_REG0_N_LSB (0)
+#define XSDFEC_REG0_K_MIN (2)
+#define XSDFEC_REG0_K_MAX (32766)
+#define XSDFEC_REG0_K_MUL_P (256)
+#define XSDFEC_REG0_K_LSB (16)
+
+/* REG1 Register */
+#define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004)
+#define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4)
+#define XSDFEC_REG1_PSIZE_MIN (2)
+#define XSDFEC_REG1_PSIZE_MAX (512)
+#define XSDFEC_REG1_NO_PACKING_MASK (0x400)
+#define XSDFEC_REG1_NO_PACKING_LSB (10)
+#define XSDFEC_REG1_NM_MASK (0xFF800)
+#define XSDFEC_REG1_NM_LSB (11)
+#define XSDFEC_REG1_BYPASS_MASK (0x100000)
+
+/* REG2 Register */
+#define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008)
+#define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8)
+#define XSDFEC_REG2_NLAYERS_MIN (1)
+#define XSDFEC_REG2_NLAYERS_MAX (256)
+#define XSDFEC_REG2_NNMQC_MASK (0xFFE00)
+#define XSDFEC_REG2_NMQC_LSB (9)
+#define XSDFEC_REG2_NORM_TYPE_MASK (0x100000)
+#define XSDFEC_REG2_NORM_TYPE_LSB (20)
+#define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000)
+#define XSDFEC_REG2_SPEICAL_QC_LSB (21)
+#define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000)
+#define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
+#define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000)
+#define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
+
+/* REG3 Register */
+#define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C)
+#define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC)
+#define XSDFEC_REG3_LA_OFF_LSB (8)
+#define XSDFEC_REG3_QC_OFF_LSB (16)
+
+#define XSDFEC_LDPC_REG_JUMP (0x10)
+#define XSDFEC_REG_WIDTH_JUMP (4)
+
+#define XSDFEC_SC_TABLE_DEPTH (0x3FC)
+#define XSDFEC_LA_TABLE_DEPTH (0xFFC)
+#define XSDFEC_QC_TABLE_DEPTH (0x7FFC)
+
+/**
+ * struct xsdfec_clks - For managing SD-FEC clocks
+ * @core_clk: Main processing clock for core
+ * @axi_clk: AXI4-Lite memory-mapped clock
+ * @din_words_clk: DIN Words AXI4-Stream Slave clock
+ * @din_clk: DIN AXI4-Stream Slave clock
+ * @dout_clk: DOUT Words AXI4-Stream Slave clock
+ * @dout_words_clk: DOUT AXI4-Stream Slave clock
+ * @ctrl_clk: Control AXI4-Stream Slave clock
+ * @status_clk: Status AXI4-Stream Slave clock
+ */
+struct xsdfec_clks {
+ struct clk *core_clk;
+ struct clk *axi_clk;
+ struct clk *din_words_clk;
+ struct clk *din_clk;
+ struct clk *dout_clk;
+ struct clk *dout_words_clk;
+ struct clk *ctrl_clk;
+ struct clk *status_clk;
+};
+
+/**
+ * struct xsdfec_dev - Driver data for SDFEC
+ * @regs: device physical base address
+ * @dev: pointer to device struct
+ * @state: State of the SDFEC device
+ * @config: Configuration of the SDFEC device
+ * @intr_enabled: indicates IRQ enabled
+ * @state_updated: indicates State updated by interrupt handler
+ * @stats_updated: indicates Stats updated by interrupt handler
+ * @isr_err_count: Count of ISR errors
+ * @cecc_count: Count of Correctable ECC errors (SBE)
+ * @uecc_count: Count of Uncorrectable ECC errors (MBE)
+ * @open_count: Count of char device being opened
+ * @irq: IRQ number
+ * @xsdfec_cdev: Character device handle
+ * @waitq: Driver wait queue
+ * @irq_lock: Driver spinlock
+ * @clks: Clocks managed by the SDFEC driver
+ *
+ * This structure contains necessary state for SDFEC driver to operate
+ */
+struct xsdfec_dev {
+ void __iomem *regs;
+ struct device *dev;
+ enum xsdfec_state state;
+ struct xsdfec_config config;
+ bool intr_enabled;
+ bool state_updated;
+ bool stats_updated;
+ atomic_t isr_err_count;
+ atomic_t cecc_count;
+ atomic_t uecc_count;
+ atomic_t open_count;
+ int irq;
+ struct cdev xsdfec_cdev;
+ wait_queue_head_t waitq;
+ /* Spinlock to protect state_updated and stats_updated */
+ spinlock_t irq_lock;
+ struct xsdfec_clks clks;
+};
+
+static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr,
+ u32 value)
+{
+ dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr);
+ iowrite32(value, xsdfec->regs + addr);
+}
+
+static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
+{
+ u32 rval;
+
+ rval = ioread32(xsdfec->regs + addr);
+ dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr);
+ return rval;
+}
+
+static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec,
+ u32 reg_offset, u32 bit_num,
+ bool *config_value)
+{
+ u32 reg_val;
+ u32 bit_mask = 1 << bit_num;
+
+ reg_val = xsdfec_regread(xsdfec, reg_offset);
+ *config_value = (reg_val & bit_mask) > 0;
+}
+
+static void update_config_from_hw(struct xsdfec_dev *xsdfec)
+{
+ u32 reg_value;
+ bool sdfec_started;
+
+ /* Update the Order */
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR);
+ xsdfec->config.order = reg_value;
+
+ update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR,
+ 0, /* Bit Number, maybe change to mask */
+ &xsdfec->config.bypass);
+
+ update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR,
+ 0, /* Bit Number */
+ &xsdfec->config.code_wr_protect);
+
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
+ xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0;
+
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
+ xsdfec->config.irq.enable_ecc_isr =
+ (reg_value & XSDFEC_ECC_ISR_MASK) > 0;
+
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
+ sdfec_started = (reg_value & XSDFEC_AXIS_IN_ENABLE_MASK) > 0;
+ if (sdfec_started)
+ xsdfec->state = XSDFEC_STARTED;
+ else
+ xsdfec->state = XSDFEC_STOPPED;
+}
+
+static int xsdfec_dev_open(struct inode *iptr, struct file *fptr)
+{
+ struct xsdfec_dev *xsdfec;
+
+ xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
+ if (!xsdfec)
+ return -EAGAIN;
+
+ /* Only one open per device at a time */
+ if (!atomic_dec_and_test(&xsdfec->open_count)) {
+ atomic_inc(&xsdfec->open_count);
+ return -EBUSY;
+ }
+
+ fptr->private_data = xsdfec;
+ return 0;
+}
+
+static int xsdfec_dev_release(struct inode *iptr, struct file *fptr)
+{
+ struct xsdfec_dev *xsdfec;
+
+ xsdfec = container_of(iptr->i_cdev, struct xsdfec_dev, xsdfec_cdev);
+ if (!xsdfec)
+ return -EAGAIN;
+
+ atomic_inc(&xsdfec->open_count);
+ return 0;
+}
+
+static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ struct xsdfec_status status;
+ int err;
+
+ status.fec_id = xsdfec->config.fec_id;
+ spin_lock_irq(&xsdfec->irq_lock);
+ status.state = xsdfec->state;
+ xsdfec->state_updated = false;
+ spin_unlock_irq(&xsdfec->irq_lock);
+ status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) &
+ XSDFEC_IS_ACTIVITY_SET);
+
+ err = copy_to_user(arg, &status, sizeof(status));
+ if (err) {
+ dev_err(xsdfec->dev, "%s failed for SDFEC%d", __func__,
+ xsdfec->config.fec_id);
+ err = -EFAULT;
+ }
+ return err;
+}
+
+static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ int err;
+
+ err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
+ if (err) {
+ dev_err(xsdfec->dev, "%s failed for SDFEC%d", __func__,
+ xsdfec->config.fec_id);
+ err = -EFAULT;
+ }
+ return err;
+}
+
+static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
+{
+ u32 mask_read;
+
+ if (enable) {
+ /* Enable */
+ xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
+ if (mask_read & XSDFEC_ISR_MASK) {
+ dev_err(xsdfec->dev,
+ "SDFEC enabling irq with IER failed");
+ return -EIO;
+ }
+ } else {
+ /* Disable */
+ xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
+ if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
+ dev_err(xsdfec->dev,
+ "SDFEC disabling irq with IDR failed");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
+{
+ u32 mask_read;
+
+ if (enable) {
+ /* Enable */
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
+ XSDFEC_ALL_ECC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
+ if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) {
+ dev_err(xsdfec->dev,
+ "SDFEC enabling ECC irq with ECC IER failed");
+ return -EIO;
+ }
+ } else {
+ /* Disable */
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
+ XSDFEC_ALL_ECC_ISR_MASK);
+ mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
+ if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
+ XSDFEC_ECC_ISR_MASK) ||
+ ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
+ XSDFEC_PL_INIT_ECC_ISR_MASK))) {
+ dev_err(xsdfec->dev,
+ "SDFEC disable ECC irq with ECC IDR failed");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ struct xsdfec_irq irq;
+ int err;
+ int isr_err;
+ int ecc_err;
+
+ err = copy_from_user(&irq, arg, sizeof(irq));
+ if (err) {
+ dev_err(xsdfec->dev, "%s failed for SDFEC%d", __func__,
+ xsdfec->config.fec_id);
+ return -EFAULT;
+ }
+
+ /* Setup tlast related IRQ */
+ isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr);
+ if (!isr_err)
+ xsdfec->config.irq.enable_isr = irq.enable_isr;
+
+ /* Setup ECC related IRQ */
+ ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr);
+ if (!ecc_err)
+ xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr;
+
+ if (isr_err < 0 || ecc_err < 0)
+ err = -EIO;
+
+ return err;
+}
+
+static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ struct xsdfec_turbo turbo;
+ int err;
+ u32 turbo_write;
+
+ err = copy_from_user(&turbo, arg, sizeof(turbo));
+ if (err) {
+ dev_err(xsdfec->dev, "%s failed for SDFEC%d", __func__,
+ xsdfec->config.fec_id);
+ return -EFAULT;
+ }
+
+ if (turbo.alg >= XSDFEC_TURBO_ALG_MAX) {
+ dev_err(xsdfec->dev,
+ "%s invalid turbo alg value %d for SDFEC%d", __func__,
+ turbo.alg, xsdfec->config.fec_id);
+ return -EINVAL;
+ }
+
+ if (turbo.scale > XSDFEC_TURBO_SCALE_MAX) {
+ dev_err(xsdfec->dev,
+ "%s invalid turbo scale value %d for SDFEC%d", __func__,
+ turbo.scale, xsdfec->config.fec_id);
+ return -EINVAL;
+ }
+
+ /* Check to see what device tree says about the FEC codes */
+ if (xsdfec->config.code == XSDFEC_LDPC_CODE) {
+ dev_err(xsdfec->dev,
+ "%s: Unable to write Turbo to SDFEC%d check DT",
+ __func__, xsdfec->config.fec_id);
+ return -EIO;
+ }
+
+ turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK)
+ << XSDFEC_TURBO_SCALE_BIT_POS) |
+ turbo.alg;
+ xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
+ return err;
+}
+
+static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ u32 reg_value;
+ struct xsdfec_turbo turbo_params;
+ int err;
+
+ if (xsdfec->config.code == XSDFEC_LDPC_CODE) {
+ dev_err(xsdfec->dev,
+ "%s: SDFEC%d is configured for LDPC, check DT",
+ __func__, xsdfec->config.fec_id);
+ return -EIO;
+ }
+
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
+
+ turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
+ XSDFEC_TURBO_SCALE_BIT_POS;
+ turbo_params.alg = reg_value & 0x1;
+
+ err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
+ if (err) {
+ dev_err(xsdfec->dev, "%s failed for SDFEC%d", __func__,
+ xsdfec->config.fec_id);
+ err = -EFAULT;
+ }
+
+ return err;
+}
+
+static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize,
+ u32 offset)
+{
+ u32 wdata;
+
+ if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX ||
+ (n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) {
+ dev_err(xsdfec->dev, "N value is not in range");
+ return -EINVAL;
+ }
+ n <<= XSDFEC_REG0_N_LSB;
+
+ if (k < XSDFEC_REG0_K_MIN || k > XSDFEC_REG0_K_MAX ||
+ (k > XSDFEC_REG0_K_MUL_P * psize) || ((k % psize) != 0)) {
+ dev_err(xsdfec->dev, "K value is not in range");
+ return -EINVAL;
+ }
+ k = k << XSDFEC_REG0_K_LSB;
+ wdata = k | n;
+
+ if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
+ XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
+ dev_err(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x",
+ XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP));
+ return -EINVAL;
+ }
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP),
+ wdata);
+ return 0;
+}
+
+static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
+ u32 no_packing, u32 nm, u32 offset)
+{
+ u32 wdata;
+
+ if (psize < XSDFEC_REG1_PSIZE_MIN || psize > XSDFEC_REG1_PSIZE_MAX) {
+ dev_err(xsdfec->dev, "Psize is not in range");
+ return -EINVAL;
+ }
+
+ if (no_packing != 0 && no_packing != 1)
+ dev_err(xsdfec->dev, "No-packing bit register invalid");
+ no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
+ XSDFEC_REG1_NO_PACKING_MASK);
+
+ if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
+ dev_err(xsdfec->dev, "NM is beyond 10 bits");
+ nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
+
+ wdata = nm | no_packing | psize;
+ if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
+ XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
+ dev_err(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x",
+ XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP));
+ return -EINVAL;
+ }
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP),
+ wdata);
+ return 0;
+}
+
+static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
+ u32 norm_type, u32 special_qc, u32 no_final_parity,
+ u32 max_schedule, u32 offset)
+{
+ u32 wdata;
+
+ if (nlayers < XSDFEC_REG2_NLAYERS_MIN ||
+ nlayers > XSDFEC_REG2_NLAYERS_MAX) {
+ dev_err(xsdfec->dev, "Nlayers is not in range");
+ return -EINVAL;
+ }
+
+ if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
+ dev_err(xsdfec->dev, "NMQC exceeds 11 bits");
+ nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
+
+ if (norm_type > 1)
+ dev_err(xsdfec->dev, "Norm type is invalid");
+ norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
+ XSDFEC_REG2_NORM_TYPE_MASK);
+ if (special_qc > 1)
+ dev_err(xsdfec->dev, "Special QC in invalid");
+ special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
+ XSDFEC_REG2_SPECIAL_QC_MASK);
+
+ if (no_final_parity > 1)
+ dev_err(xsdfec->dev, "No final parity check invalid");
+ no_final_parity =
+ ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
+ XSDFEC_REG2_NO_FINAL_PARITY_MASK);
+ if (max_schedule &
+ ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >> XSDFEC_REG2_MAX_SCHEDULE_LSB))
+ dev_err(xsdfec->dev, "Max Schdule exceeds 2 bits");
+ max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
+ XSDFEC_REG2_MAX_SCHEDULE_MASK);
+
+ wdata = (max_schedule | no_final_parity | special_qc | norm_type |
+ nmqc | nlayers);
+
+ if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
+ XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
+ dev_err(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x",
+ XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP));
+ return -EINVAL;
+ }
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP),
+ wdata);
+ return 0;
+}
+
+static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off,
+ u16 qc_off, u32 offset)
+{
+ u32 wdata;
+
+ wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
+ (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
+ if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
+ XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
+ dev_err(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x",
+ XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP));
+ return -EINVAL;
+ }
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
+ (offset * XSDFEC_LDPC_REG_JUMP),
+ wdata);
+ return 0;
+}
+
+static int xsdfec_sc_table_write(struct xsdfec_dev *xsdfec, u32 offset,
+ u32 *sc_ptr, u32 len)
+{
+ u32 reg;
+
+ /*
+ * Writes that go beyond the length of
+ * Shared Scale(SC) table should fail
+ */
+ if ((XSDFEC_REG_WIDTH_JUMP * (offset + len)) > XSDFEC_SC_TABLE_DEPTH) {
+ dev_err(xsdfec->dev, "Write exceeds SC table length");
+ return -EINVAL;
+ }
+
+ for (reg = 0; reg < len; reg++) {
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_SC_TABLE_ADDR_BASE +
+ (offset + reg) * XSDFEC_REG_WIDTH_JUMP,
+ sc_ptr[reg]);
+ }
+ return reg;
+}
+
+static int xsdfec_la_table_write(struct xsdfec_dev *xsdfec, u32 offset,
+ u32 *la_ptr, u32 len)
+{
+ u32 reg;
+
+ if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_LA_TABLE_DEPTH) {
+ dev_err(xsdfec->dev, "Write exceeds LA table length");
+ return -EINVAL;
+ }
+
+ for (reg = 0; reg < len; reg++) {
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_LA_TABLE_ADDR_BASE +
+ (offset + reg) * XSDFEC_REG_WIDTH_JUMP,
+ la_ptr[reg]);
+ }
+ return reg;
+}
+
+static int xsdfec_qc_table_write(struct xsdfec_dev *xsdfec, u32 offset,
+ u32 *qc_ptr, u32 len)
+{
+ u32 reg;
+
+ if (XSDFEC_REG_WIDTH_JUMP * (offset + len) > XSDFEC_QC_TABLE_DEPTH) {
+ dev_err(xsdfec->dev, "Write exceeds QC table length");
+ return -EINVAL;
+ }
+
+ for (reg = 0; reg < len; reg++) {
+ xsdfec_regwrite(xsdfec,
+ XSDFEC_LDPC_QC_TABLE_ADDR_BASE +
+ (offset + reg) * XSDFEC_REG_WIDTH_JUMP,
+ qc_ptr[reg]);
+ }
+
+ return reg;
+}
+
+static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ struct xsdfec_ldpc_params *ldpc;
+ int ret;
+
+ ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
+ if (!ldpc)
+ return -ENOMEM;
+
+ ret = copy_from_user(ldpc, arg, sizeof(*ldpc));
+ if (ret) {
+ dev_err(xsdfec->dev, "%s failed to copy from user for SDFEC%d",
+ __func__, xsdfec->config.fec_id);
+ goto err_out;
+ }
+ if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
+ dev_err(xsdfec->dev,
+ "%s: Unable to write LDPC to SDFEC%d check DT",
+ __func__, xsdfec->config.fec_id);
+ ret = -EIO;
+ goto err_out;
+ }
+
+ /* Verify Device has not started */
+ if (xsdfec->state == XSDFEC_STARTED) {
+ dev_err(xsdfec->dev,
+ "%s attempting to write LDPC code while started for SDFEC%d",
+ __func__, xsdfec->config.fec_id);
+ ret = -EIO;
+ goto err_out;
+ }
+
+ if (xsdfec->config.code_wr_protect) {
+ dev_err(xsdfec->dev,
+ "%s writing LDPC code while Code Write Protection enabled for SDFEC%d",
+ __func__, xsdfec->config.fec_id);
+ ret = -EIO;
+ goto err_out;
+ }
+
+ /* Write Reg 0 */
+ ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize,
+ ldpc->code_id);
+ if (ret)
+ goto err_out;
+
+ /* Write Reg 1 */
+ ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm,
+ ldpc->code_id);
+ if (ret)
+ goto err_out;
+
+ /* Write Reg 2 */
+ ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
+ ldpc->norm_type, ldpc->special_qc,
+ ldpc->no_final_parity, ldpc->max_schedule,
+ ldpc->code_id);
+ if (ret)
+ goto err_out;
+
+ /* Write Reg 3 */
+ ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off,
+ ldpc->qc_off, ldpc->code_id);
+ if (ret)
+ goto err_out;
+
+ /* Write Shared Codes */
+ ret = xsdfec_sc_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table,
+ ldpc->nlayers);
+ if (ret < 0)
+ goto err_out;
+
+ ret = xsdfec_la_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table,
+ ldpc->nlayers);
+ if (ret < 0)
+ goto err_out;
+
+ ret = xsdfec_qc_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table,
+ ldpc->nqc);
+ if (ret > 0)
+ ret = 0;
+err_out:
+ kfree(ldpc);
+ return ret;
+}
+
+static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ bool order_invalid;
+ enum xsdfec_order order = *((enum xsdfec_order *)arg);
+
+ order_invalid = (order != XSDFEC_MAINTAIN_ORDER) &&
+ (order != XSDFEC_OUT_OF_ORDER);
+ if (order_invalid) {
+ dev_err(xsdfec->dev, "%s invalid order value %d for SDFEC%d",
+ __func__, order, xsdfec->config.fec_id);
+ return -EINVAL;
+ }
+
+ /* Verify Device has not started */
+ if (xsdfec->state == XSDFEC_STARTED) {
+ dev_err(xsdfec->dev,
+ "%s attempting to set Order while started for SDFEC%d",
+ __func__, xsdfec->config.fec_id);
+ return -EIO;
+ }
+
+ xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order);
+
+ xsdfec->config.order = order;
+
+ return 0;
+}
+
+static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg)
+{
+ bool bypass = *arg;
+
+ /* Verify Device has not started */
+ if (xsdfec->state == XSDFEC_STARTED) {
+ dev_err(xsdfec->dev,
+ "%s attempting to set bypass while started for SDFEC%d",
+ __func__, xsdfec->config.fec_id);
+ return -EIO;
+ }
+
+ if (bypass)
+ xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1);
+ else
+ xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0);
+
+ xsdfec->config.bypass = bypass;
+
+ return 0;
+}
+
+static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *is_active)
+{
+ u32 reg_value;
+
+ reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
+ /* using a double ! operator instead of casting */
+ *is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
+
+ return 0;
+}
+
+static u32
+xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg)
+{
+ u32 axis_width_field = 0;
+
+ switch (axis_width_cfg) {
+ case XSDFEC_1x128b:
+ axis_width_field = 0;
+ break;
+ case XSDFEC_2x128b:
+ axis_width_field = 1;
+ break;
+ case XSDFEC_4x128b:
+ axis_width_field = 2;
+ break;
+ }
+
+ return axis_width_field;
+}
+
+static u32 xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include
+ axis_word_inc_cfg)
+{
+ u32 axis_words_field = 0;
+
+ if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE ||
+ axis_word_inc_cfg == XSDFEC_IN_BLOCK)
+ axis_words_field = 0;
+ else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION)
+ axis_words_field = 1;
+
+ return axis_words_field;
+}
+
+static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
+{
+ u32 reg_value;
+ u32 dout_words_field;
+ u32 dout_width_field;
+ u32 din_words_field;
+ u32 din_width_field;
+ struct xsdfec_config *config = &xsdfec->config;
+
+ /* translate config info to register values */
+ dout_words_field =
+ xsdfec_translate_axis_words_cfg_val(config->dout_word_include);
+ dout_width_field =
+ xsdfec_translate_axis_width_cfg_val(config->dout_width);
+ din_words_field =
+ xsdfec_translate_axis_words_cfg_val(config->din_word_include);
+ din_width_field =
+ xsdfec_translate_axis_width_cfg_val(config->din_width);
+
+ reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB;
+ reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB;
+ reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB;
+ reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB;
+
+ xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value);
+
+ return 0;
+}
+
+static int xsdfec_start(struct xsdfec_dev *xsdfec)
+{
+ u32 regread;
+
+ regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
+ regread &= 0x1;
+ if (regread != xsdfec->config.code) {
+ dev_err(xsdfec->dev,
+ "%s SDFEC HW code does not match driver code, reg %d, code %d",
+ __func__, regread, xsdfec->config.code);
+ return -EINVAL;
+ }
+
+ /* Set AXIS enable */
+ xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR,
+ XSDFEC_AXIS_ENABLE_MASK);
+ /* Done */
+ xsdfec->state = XSDFEC_STARTED;
+ return 0;
+}
+
+static int xsdfec_stop(struct xsdfec_dev *xsdfec)
+{
+ u32 regread;
+
+ if (xsdfec->state != XSDFEC_STARTED)
+ dev_err(xsdfec->dev, "Device not started correctly");
+ /* Disable AXIS_ENABLE Input interfaces only */
+ regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
+ regread &= (~XSDFEC_AXIS_IN_ENABLE_MASK);
+ xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
+ /* Stop */
+ xsdfec->state = XSDFEC_STOPPED;
+ return 0;
+}
+
+static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec)
+{
+ atomic_set(&xsdfec->isr_err_count, 0);
+ atomic_set(&xsdfec->uecc_count, 0);
+ atomic_set(&xsdfec->cecc_count, 0);
+
+ return 0;
+}
+
+static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg)
+{
+ int err;
+ struct xsdfec_stats user_stats;
+
+ spin_lock_irq(&xsdfec->irq_lock);
+ user_stats.isr_err_count = atomic_read(&xsdfec->isr_err_count);
+ user_stats.cecc_count = atomic_read(&xsdfec->cecc_count);
+ user_stats.uecc_count = atomic_read(&xsdfec->uecc_count);
+ xsdfec->stats_updated = false;
+ spin_unlock_irq(&xsdfec->irq_lock);
+
+ err = copy_to_user(arg, &user_stats, sizeof(user_stats));
+ if (err) {
+ dev_err(xsdfec->dev, "%s failed for SDFEC%d", __func__,
+ xsdfec->config.fec_id);
+ err = -EFAULT;
+ }
+
+ return err;
+}
+
+static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec)
+{
+ /* Ensure registers are aligned with core configuration */
+ xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
+ xsdfec_cfg_axi_streams(xsdfec);
+ update_config_from_hw(xsdfec);
+
+ return 0;
+}
+
+static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
+ unsigned long data)
+{
+ struct xsdfec_dev *xsdfec = fptr->private_data;
+ void __user *arg = NULL;
+ int rval = -EINVAL;
+ int err = 0;
+
+ if (!xsdfec)
+ return rval;
+
+ /* In failed state allow only reset and get status IOCTLs */
+ if (xsdfec->state == XSDFEC_NEEDS_RESET &&
+ (cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS &&
+ cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) {
+ dev_err(xsdfec->dev, "SDFEC%d in failed state. Reset Required",
+ xsdfec->config.fec_id);
+ return -EPERM;
+ }
+
+ if (_IOC_TYPE(cmd) != XSDFEC_MAGIC) {
+ dev_err(xsdfec->dev, "Not a xilinx sdfec ioctl");
+ return -ENOTTY;
+ }
+
+ /* check if ioctl argument is present and valid */
+ if (_IOC_DIR(cmd) != _IOC_NONE) {
+ arg = (void __user *)data;
+ if (!arg) {
+ dev_err(xsdfec->dev,
+ "xilinx sdfec ioctl argument is NULL Pointer");
+ return rval;
+ }
+ }
+
+ /* Access check of the argument if present */
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok((void *)arg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ err = !access_ok((void *)arg, _IOC_SIZE(cmd));
+
+ if (err) {
+ dev_err(xsdfec->dev, "Invalid xilinx sdfec ioctl argument");
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case XSDFEC_START_DEV:
+ rval = xsdfec_start(xsdfec);
+ break;
+ case XSDFEC_STOP_DEV:
+ rval = xsdfec_stop(xsdfec);
+ break;
+ case XSDFEC_CLEAR_STATS:
+ rval = xsdfec_clear_stats(xsdfec);
+ break;
+ case XSDFEC_GET_STATS:
+ rval = xsdfec_get_stats(xsdfec, arg);
+ break;
+ case XSDFEC_GET_STATUS:
+ rval = xsdfec_get_status(xsdfec, arg);
+ break;
+ case XSDFEC_GET_CONFIG:
+ rval = xsdfec_get_config(xsdfec, arg);
+ break;
+ case XSDFEC_SET_DEFAULT_CONFIG:
+ rval = xsdfec_set_default_config(xsdfec);
+ break;
+ case XSDFEC_SET_IRQ:
+ rval = xsdfec_set_irq(xsdfec, arg);
+ break;
+ case XSDFEC_SET_TURBO:
+ rval = xsdfec_set_turbo(xsdfec, arg);
+ break;
+ case XSDFEC_GET_TURBO:
+ rval = xsdfec_get_turbo(xsdfec, arg);
+ break;
+ case XSDFEC_ADD_LDPC_CODE_PARAMS:
+ rval = xsdfec_add_ldpc(xsdfec, arg);
+ break;
+ case XSDFEC_SET_ORDER:
+ rval = xsdfec_set_order(xsdfec, arg);
+ break;
+ case XSDFEC_SET_BYPASS:
+ rval = xsdfec_set_bypass(xsdfec, arg);
+ break;
+ case XSDFEC_IS_ACTIVE:
+ rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
+ break;
+ default:
+ /* Should not get here */
+ dev_err(xsdfec->dev, "Undefined SDFEC IOCTL");
+ break;
+ }
+ return rval;
+}
+
+static unsigned int xsdfec_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct xsdfec_dev *xsdfec = file->private_data;
+
+ if (!xsdfec)
+ return POLLNVAL | POLLHUP;
+
+ poll_wait(file, &xsdfec->waitq, wait);
+
+ /* XSDFEC ISR detected an error */
+ spin_lock_irq(&xsdfec->irq_lock);
+ if (xsdfec->state_updated)
+ mask |= POLLIN | POLLPRI;
+
+ if (xsdfec->stats_updated)
+ mask |= POLLIN | POLLRDNORM;
+ spin_unlock_irq(&xsdfec->irq_lock);
+
+ return mask;
+}
+
+static const struct file_operations xsdfec_fops = {
+ .owner = THIS_MODULE,
+ .open = xsdfec_dev_open,
+ .release = xsdfec_dev_release,
+ .unlocked_ioctl = xsdfec_dev_ioctl,
+ .poll = xsdfec_poll,
+};
+
+static int xsdfec_parse_of(struct xsdfec_dev *xsdfec)
+{
+ struct device *dev = xsdfec->dev;
+ struct device_node *node = dev->of_node;
+ int rval;
+ const char *fec_code;
+ u32 din_width;
+ u32 din_word_include;
+ u32 dout_width;
+ u32 dout_word_include;
+
+ rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,sdfec-code not in DT");
+ return rval;
+ }
+
+ if (!strcasecmp(fec_code, "ldpc")) {
+ xsdfec->config.code = XSDFEC_LDPC_CODE;
+ } else if (!strcasecmp(fec_code, "turbo")) {
+ xsdfec->config.code = XSDFEC_TURBO_CODE;
+ } else {
+ dev_err(xsdfec->dev, "Invalid Code in DT");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,sdfec-din-words",
+ &din_word_include);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,sdfec-din-words not in DT");
+ return rval;
+ }
+
+ if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX) {
+ xsdfec->config.din_word_include = din_word_include;
+ } else {
+ dev_err(xsdfec->dev, "Invalid DIN Words in DT");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,sdfec-din-width not in DT");
+ return rval;
+ }
+
+ switch (din_width) {
+ /* Fall through and set for valid values */
+ case XSDFEC_1x128b:
+ case XSDFEC_2x128b:
+ case XSDFEC_4x128b:
+ xsdfec->config.din_width = din_width;
+ break;
+ default:
+ dev_err(xsdfec->dev, "Invalid DIN Width in DT");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,sdfec-dout-words",
+ &dout_word_include);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,sdfec-dout-words not in DT");
+ return rval;
+ }
+
+ if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX) {
+ xsdfec->config.dout_word_include = dout_word_include;
+ } else {
+ dev_err(xsdfec->dev, "Invalid DOUT Words in DT");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,sdfec-dout-width not in DT");
+ return rval;
+ }
+
+ switch (dout_width) {
+ /* Fall through and set for valid values */
+ case XSDFEC_1x128b:
+ case XSDFEC_2x128b:
+ case XSDFEC_4x128b:
+ xsdfec->config.dout_width = dout_width;
+ break;
+ default:
+ dev_err(xsdfec->dev, "Invalid DOUT Width in DT");
+ return -EINVAL;
+ }
+
+ /* Write LDPC to CODE Register */
+ xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
+
+ xsdfec_cfg_axi_streams(xsdfec);
+
+ return 0;
+}
+
+static void xsdfec_count_and_clear_ecc_multi_errors(struct xsdfec_dev *xsdfec,
+ u32 uecc)
+{
+ u32 uecc_event;
+
+ /* Update ECC ISR error counts */
+ atomic_add(hweight32(uecc), &xsdfec->uecc_count);
+ xsdfec->stats_updated = true;
+
+ /* Clear ECC errors */
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR,
+ XSDFEC_ALL_ECC_ISR_MBE_MASK);
+ /* Clear ECC events */
+ if (uecc & XSDFEC_ECC_ISR_MBE_MASK) {
+ uecc_event = uecc >> XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT;
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, uecc_event);
+ } else if (uecc & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK) {
+ uecc_event = uecc >> XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT;
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, uecc_event);
+ }
+}
+
+static void xsdfec_count_and_clear_ecc_single_errors(struct xsdfec_dev *xsdfec,
+ u32 cecc, u32 sbe_mask)
+{
+ /* Update ECC ISR error counts */
+ atomic_add(hweight32(cecc), &xsdfec->cecc_count);
+ xsdfec->stats_updated = true;
+
+ /* Clear ECC errors */
+ xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, sbe_mask);
+}
+
+static void xsdfec_count_and_clear_isr_errors(struct xsdfec_dev *xsdfec,
+ u32 isr_err)
+{
+ /* Update ISR error counts */
+ atomic_add(hweight32(isr_err), &xsdfec->isr_err_count);
+ xsdfec->stats_updated = true;
+
+ /* Clear ISR error status */
+ xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, XSDFEC_ISR_MASK);
+}
+
+static void xsdfec_update_state_for_isr_err(struct xsdfec_dev *xsdfec)
+{
+ xsdfec->state = XSDFEC_NEEDS_RESET;
+ xsdfec->state_updated = true;
+}
+
+static void xsdfec_update_state_for_ecc_err(struct xsdfec_dev *xsdfec,
+ u32 ecc_err)
+{
+ if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK)
+ xsdfec->state = XSDFEC_NEEDS_RESET;
+ else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
+ xsdfec->state = XSDFEC_PL_RECONFIGURE;
+
+ xsdfec->state_updated = true;
+}
+
+static int xsdfec_get_sbe_mask(u32 ecc_err)
+{
+ u32 sbe_mask = XSDFEC_ALL_ECC_ISR_SBE_MASK;
+
+ if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK) {
+ sbe_mask = (XSDFEC_ECC_ISR_MBE_MASK - ecc_err) >>
+ XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT;
+ } else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
+ sbe_mask = (XSDFEC_PL_INIT_ECC_ISR_MBE_MASK - ecc_err) >>
+ XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT;
+
+ return sbe_mask;
+}
+
+static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id)
+{
+ struct xsdfec_dev *xsdfec = dev_id;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 ecc_err;
+ u32 isr_err;
+ u32 err_value;
+ u32 sbe_mask;
+
+ WARN_ON(xsdfec->irq != irq);
+
+ /* Mask Interrupts */
+ xsdfec_isr_enable(xsdfec, false);
+ xsdfec_ecc_isr_enable(xsdfec, false);
+
+ /* Read Interrupt Status Registers */
+ ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
+ isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
+
+ spin_lock(&xsdfec->irq_lock);
+
+ err_value = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK;
+ if (err_value) {
+ dev_err(xsdfec->dev, "Multi-bit error on xsdfec%d",
+ xsdfec->config.fec_id);
+ /* Count and clear multi-bit errors and associated events */
+ xsdfec_count_and_clear_ecc_multi_errors(xsdfec, err_value);
+ xsdfec_update_state_for_ecc_err(xsdfec, ecc_err);
+ }
+
+ /*
+ * Update SBE mask to remove events associated with MBE if present.
+ * If no MBEs are present will return mask for all SBE bits
+ */
+ sbe_mask = xsdfec_get_sbe_mask(err_value);
+ err_value = ecc_err & sbe_mask;
+ if (err_value) {
+ dev_info(xsdfec->dev, "Correctable error on xsdfec%d",
+ xsdfec->config.fec_id);
+ xsdfec_count_and_clear_ecc_single_errors(xsdfec, err_value,
+ sbe_mask);
+ }
+
+ err_value = isr_err & XSDFEC_ISR_MASK;
+ if (err_value) {
+ dev_err(xsdfec->dev,
+ "Tlast,or DIN_WORDS or DOUT_WORDS not correct");
+ xsdfec_count_and_clear_isr_errors(xsdfec, err_value);
+ xsdfec_update_state_for_isr_err(xsdfec);
+ }
+
+ if (xsdfec->state_updated || xsdfec->stats_updated)
+ wake_up_interruptible(&xsdfec->waitq);
+ else
+ ret = IRQ_NONE;
+
+ /* Unmaks Interrupts */
+ xsdfec_isr_enable(xsdfec, true);
+ xsdfec_ecc_isr_enable(xsdfec, true);
+
+ spin_unlock(&xsdfec->irq_lock);
+
+ return ret;
+}
+
+static int xsdfec_clk_init(struct platform_device *pdev,
+ struct xsdfec_clks *clks)
+{
+ int err;
+
+ clks->core_clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(clks->core_clk)) {
+ dev_err(&pdev->dev, "failed to get core_clk");
+ return PTR_ERR(clks->core_clk);
+ }
+
+ clks->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(clks->axi_clk)) {
+ dev_err(&pdev->dev, "failed to get axi_clk");
+ return PTR_ERR(clks->axi_clk);
+ }
+
+ clks->din_words_clk = devm_clk_get(&pdev->dev, "s_axis_din_words_aclk");
+ if (IS_ERR(clks->din_words_clk))
+ clks->din_words_clk = NULL;
+
+ clks->din_clk = devm_clk_get(&pdev->dev, "s_axis_din_aclk");
+ if (IS_ERR(clks->din_clk))
+ clks->din_clk = NULL;
+
+ clks->dout_clk = devm_clk_get(&pdev->dev, "m_axis_dout_aclk");
+ if (IS_ERR(clks->dout_clk))
+ clks->dout_clk = NULL;
+
+ clks->dout_words_clk =
+ devm_clk_get(&pdev->dev, "s_axis_dout_words_aclk");
+ if (IS_ERR(clks->dout_words_clk))
+ clks->dout_words_clk = NULL;
+
+ clks->ctrl_clk = devm_clk_get(&pdev->dev, "s_axis_ctrl_aclk");
+ if (IS_ERR(clks->ctrl_clk))
+ clks->ctrl_clk = NULL;
+
+ clks->status_clk = devm_clk_get(&pdev->dev, "m_axis_status_aclk");
+ if (IS_ERR(clks->status_clk))
+ clks->status_clk = NULL;
+
+ err = clk_prepare_enable(clks->core_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable core_clk (%d)", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(clks->axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%d)", err);
+ goto err_disable_core_clk;
+ }
+
+ err = clk_prepare_enable(clks->din_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable din_clk (%d)", err);
+ goto err_disable_axi_clk;
+ }
+
+ err = clk_prepare_enable(clks->din_words_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable din_words_clk (%d)", err);
+ goto err_disable_din_clk;
+ }
+
+ err = clk_prepare_enable(clks->dout_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable dout_clk (%d)", err);
+ goto err_disable_din_words_clk;
+ }
+
+ err = clk_prepare_enable(clks->dout_words_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable dout_words_clk (%d)",
+ err);
+ goto err_disable_dout_clk;
+ }
+
+ err = clk_prepare_enable(clks->ctrl_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable ctrl_clk (%d)", err);
+ goto err_disable_dout_words_clk;
+ }
+
+ err = clk_prepare_enable(clks->status_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable status_clk (%d)\n", err);
+ goto err_disable_ctrl_clk;
+ }
+
+ return err;
+
+err_disable_ctrl_clk:
+ clk_disable_unprepare(clks->ctrl_clk);
+err_disable_dout_words_clk:
+ clk_disable_unprepare(clks->dout_words_clk);
+err_disable_dout_clk:
+ clk_disable_unprepare(clks->dout_clk);
+err_disable_din_words_clk:
+ clk_disable_unprepare(clks->din_words_clk);
+err_disable_din_clk:
+ clk_disable_unprepare(clks->din_clk);
+err_disable_axi_clk:
+ clk_disable_unprepare(clks->axi_clk);
+err_disable_core_clk:
+ clk_disable_unprepare(clks->core_clk);
+
+ return err;
+}
+
+static void xsdfec_disable_all_clks(struct xsdfec_clks *clks)
+{
+ clk_disable_unprepare(clks->status_clk);
+ clk_disable_unprepare(clks->ctrl_clk);
+ clk_disable_unprepare(clks->dout_words_clk);
+ clk_disable_unprepare(clks->dout_clk);
+ clk_disable_unprepare(clks->din_words_clk);
+ clk_disable_unprepare(clks->din_clk);
+ clk_disable_unprepare(clks->core_clk);
+ clk_disable_unprepare(clks->axi_clk);
+}
+
+static int xsdfec_probe(struct platform_device *pdev)
+{
+ struct xsdfec_dev *xsdfec;
+ struct device *dev;
+ struct device *dev_create;
+ struct resource *res;
+ int err;
+ bool irq_enabled = true;
+
+ xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
+ if (!xsdfec)
+ return -ENOMEM;
+
+ xsdfec->dev = &pdev->dev;
+ xsdfec->config.fec_id = atomic_read(&xsdfec_ndevs);
+ spin_lock_init(&xsdfec->irq_lock);
+
+ err = xsdfec_clk_init(pdev, &xsdfec->clks);
+ if (err)
+ return err;
+
+ dev = xsdfec->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xsdfec->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(xsdfec->regs)) {
+ dev_err(dev, "Unable to map resource");
+ err = PTR_ERR(xsdfec->regs);
+ goto err_xsdfec_dev;
+ }
+
+ xsdfec->irq = platform_get_irq(pdev, 0);
+ if (xsdfec->irq < 0) {
+ dev_dbg(dev, "platform_get_irq failed");
+ irq_enabled = false;
+ }
+
+ err = xsdfec_parse_of(xsdfec);
+ if (err < 0)
+ goto err_xsdfec_dev;
+
+ update_config_from_hw(xsdfec);
+
+ /* Save driver private data */
+ platform_set_drvdata(pdev, xsdfec);
+
+ if (irq_enabled) {
+ init_waitqueue_head(&xsdfec->waitq);
+ /* Register IRQ thread */
+ err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
+ xsdfec_irq_thread, IRQF_ONESHOT,
+ "xilinx-sdfec16", xsdfec);
+ if (err < 0) {
+ dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
+ goto err_xsdfec_dev;
+ }
+ }
+
+ cdev_init(&xsdfec->xsdfec_cdev, &xsdfec_fops);
+ xsdfec->xsdfec_cdev.owner = THIS_MODULE;
+ err = cdev_add(&xsdfec->xsdfec_cdev,
+ MKDEV(MAJOR(xsdfec_devt), xsdfec->config.fec_id), 1);
+ if (err < 0) {
+ dev_err(dev, "cdev_add failed");
+ err = -EIO;
+ goto err_xsdfec_dev;
+ }
+
+ if (!xsdfec_class) {
+ err = -EIO;
+ dev_err(dev, "xsdfec class not created correctly");
+ goto err_xsdfec_cdev;
+ }
+
+ dev_create =
+ device_create(xsdfec_class, dev,
+ MKDEV(MAJOR(xsdfec_devt), xsdfec->config.fec_id),
+ xsdfec, "xsdfec%d", xsdfec->config.fec_id);
+ if (IS_ERR(dev_create)) {
+ dev_err(dev, "unable to create device");
+ err = PTR_ERR(dev_create);
+ goto err_xsdfec_cdev;
+ }
+
+ atomic_set(&xsdfec->open_count, 1);
+ dev_info(dev, "XSDFEC%d Probe Successful", xsdfec->config.fec_id);
+ atomic_inc(&xsdfec_ndevs);
+ return 0;
+
+ /* Failure cleanup */
+err_xsdfec_cdev:
+ cdev_del(&xsdfec->xsdfec_cdev);
+err_xsdfec_dev:
+ xsdfec_disable_all_clks(&xsdfec->clks);
+ return err;
+}
+
+static int xsdfec_remove(struct platform_device *pdev)
+{
+ struct xsdfec_dev *xsdfec;
+ struct device *dev = &pdev->dev;
+
+ xsdfec = platform_get_drvdata(pdev);
+ if (!xsdfec)
+ return -ENODEV;
+
+ if (!xsdfec_class) {
+ dev_err(dev, "xsdfec_class is NULL");
+ return -EIO;
+ }
+
+ xsdfec_disable_all_clks(&xsdfec->clks);
+
+ device_destroy(xsdfec_class,
+ MKDEV(MAJOR(xsdfec_devt), xsdfec->config.fec_id));
+ cdev_del(&xsdfec->xsdfec_cdev);
+ atomic_dec(&xsdfec_ndevs);
+ return 0;
+}
+
+static const struct of_device_id xsdfec_of_match[] = {
+ {
+ .compatible = "xlnx,sd-fec-1.1",
+ },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, xsdfec_of_match);
+
+static struct platform_driver xsdfec_driver = {
+ .driver = {
+ .name = "xilinx-sdfec",
+ .of_match_table = xsdfec_of_match,
+ },
+ .probe = xsdfec_probe,
+ .remove = xsdfec_remove,
+};
+
+static int __init xsdfec_init_mod(void)
+{
+ int err;
+
+ xsdfec_class = class_create(THIS_MODULE, DRIVER_NAME);
+ if (IS_ERR(xsdfec_class)) {
+ err = PTR_ERR(xsdfec_class);
+ pr_err("%s : Unable to register xsdfec class", __func__);
+ return err;
+ }
+
+ err = alloc_chrdev_region(&xsdfec_devt, 0, DRIVER_MAX_DEV, DRIVER_NAME);
+ if (err < 0) {
+ pr_err("%s : Unable to get major number", __func__);
+ goto err_xsdfec_class;
+ }
+
+ err = platform_driver_register(&xsdfec_driver);
+ if (err < 0) {
+ pr_err("%s Unabled to register %s driver", __func__,
+ DRIVER_NAME);
+ goto err_xsdfec_drv;
+ }
+ return 0;
+
+ /* Error Path */
+err_xsdfec_drv:
+ unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
+err_xsdfec_class:
+ class_destroy(xsdfec_class);
+ return err;
+}
+
+static void __exit xsdfec_cleanup_mod(void)
+{
+ platform_driver_unregister(&xsdfec_driver);
+ unregister_chrdev_region(xsdfec_devt, DRIVER_MAX_DEV);
+ class_destroy(xsdfec_class);
+ xsdfec_class = NULL;
+}
+
+module_init(xsdfec_init_mod);
+module_exit(xsdfec_cleanup_mod);
+
+MODULE_AUTHOR("Xilinx, Inc");
+MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/xilinx_trafgen.c b/drivers/misc/xilinx_trafgen.c
new file mode 100644
index 000000000000..7432be78ee01
--- /dev/null
+++ b/drivers/misc/xilinx_trafgen.c
@@ -0,0 +1,1494 @@
+/*
+ * Xilinx AXI Traffic Generator
+ *
+ * Copyright (C) 2013 - 2014 Xilinx, Inc.
+ *
+ * Description:
+ * This driver is developed for AXI Traffic Generator IP, which is
+ * designed to generate AXI4 traffic which can be used to stress
+ * different modules/interconnect connected in the system. Different
+ * configurable options which are provided through sysfs entries
+ * allow the user to generate a wide variety of traffic based on
+ * their requirements.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Hw specific definitions */
+
+/* Internal RAM Offsets */
+#define XTG_PARAM_RAM_OFFSET 0x1000 /* Parameter RAM offset */
+#define XTG_COMMAND_RAM_OFFSET 0x8000 /* Command RAM offset */
+#define XTG_COMMAND_RAM_MSB_OFFSET 0xa000 /**< Command RAM MSB Offset */
+#define XTG_MASTER_RAM_INIT_OFFSET 0x10000 /* Master RAM initial offset(v1.0) */
+#define XTG_MASTER_RAM_OFFSET 0xc000 /* Master RAM offset */
+#define XTG_WRITE_COMMAND_RAM_OFFSET 0x9000 /* Write Command RAM offset */
+
+/* Register Offsets */
+#define XTG_MCNTL_OFFSET 0x00 /* Master control */
+#define XTG_SCNTL_OFFSET 0x04 /* Slave control */
+#define XTG_ERR_STS_OFFSET 0x08 /* Error status */
+#define XTG_ERR_EN_OFFSET 0x0C /* Error enable */
+#define XTG_MSTERR_INTR_OFFSET 0x10 /* Master error interrupt enable */
+#define XTG_CFG_STS_OFFSET 0x14 /* Config status */
+#define XTG_STREAM_CNTL_OFFSET 0x30 /* Streaming Control */
+#define XTG_STREAM_TL_OFFSET 0x38 /* Streaming Transfer Length */
+#define XTG_STATIC_CNTL_OFFSET 0x60 /* Static Control */
+#define XTG_STATIC_LEN_OFFSET 0x64 /* Static Length */
+
+/* Register Bitmasks/shifts */
+
+/* Master logic enable */
+#define XTG_MCNTL_MSTEN_MASK 0x00100000
+/* Loop enable */
+#define XTG_MCNTL_LOOPEN_MASK 0x00080000
+/* Slave error interrupt enable */
+#define XTG_SCNTL_ERREN_MASK 0x00008000
+/* Master complete interrupt enable */
+#define XTG_ERR_EN_MSTIRQEN_MASK 0x80000000
+/* Master error interrupt enable */
+#define XTG_MSTERR_INTR_MINTREN_MASK 0x00008000
+/* Master complete done status */
+#define XTG_ERR_STS_MSTDONE_MASK 0x80000000
+/* Error mask for error status/enable registers */
+#define XTG_ERR_ALL_ERRS_MASK 0x801F0003
+/* Core Revision shift */
+#define XTG_MCNTL_REV_SHIFT 24
+
+/* Axi Traffic Generator Command RAM Entry field mask/shifts */
+
+/* Command RAM entry masks */
+#define XTG_LEN_MASK 0xFF /* Driven to a*_len line */
+#define XTG_LOCK_MASK 0x1 /* Driven to a*_lock line */
+#define XTG_BURST_MASK 0x3 /* Driven to a*_burst line */
+#define XTG_SIZE_MASK 0x7 /* Driven to a*_size line */
+#define XTG_ID_MASK 0x1F /* Driven to a*_id line */
+#define XTG_PROT_MASK 0x7 /* Driven to a*_prot line */
+#define XTG_LAST_ADDR_MASK 0x7 /* Last address */
+#define XTG_VALID_CMD_MASK 0x1 /* Valid Command */
+#define XTG_MSTRAM_INDEX_MASK 0x1FFF /* Master RAM Index */
+#define XTG_OTHER_DEPEND_MASK 0x1FF /* Other depend Command no */
+#define XTG_MY_DEPEND_MASK 0x1FF /* My depend command no */
+#define XTG_QOS_MASK 0xF /* Driven to a*_qos line */
+#define XTG_USER_MASK 0xFF /* Driven to a*_user line */
+#define XTG_CACHE_MASK 0xF /* Driven to a*_cache line */
+#define XTG_EXPECTED_RESP_MASK 0x7 /* Expected response */
+
+/* Command RAM entry shift values */
+#define XTG_LEN_SHIFT 0 /* Driven to a*_len line */
+#define XTG_LOCK_SHIFT 8 /* Driven to a*_lock line */
+#define XTG_BURST_SHIFT 10 /* Driven to a*_burst line */
+#define XTG_SIZE_SHIFT 12 /* Driven to a*_size line */
+#define XTG_ID_SHIFT 15 /* Driven to a*_id line */
+#define XTG_PROT_SHIFT 21 /* Driven to a*_prot line */
+#define XTG_LAST_ADDR_SHIFT 28 /* Last address */
+#define XTG_VALID_CMD_SHIFT 31 /* Valid Command */
+#define XTG_MSTRAM_INDEX_SHIFT 0 /* Master RAM Index */
+#define XTG_OTHER_DEPEND_SHIFT 13 /* Other depend cmd num */
+#define XTG_MY_DEPEND_SHIFT 22 /* My depend cmd num */
+#define XTG_QOS_SHIFT 16 /* Driven to a*_qos line */
+#define XTG_USER_SHIFT 5 /* Driven to a*_user line */
+#define XTG_CACHE_SHIFT 4 /* Driven to a*_cache line */
+#define XTG_EXPECTED_RESP_SHIFT 0 /* Expected response */
+
+/* Axi Traffic Generator Parameter RAM Entry field mask/shifts */
+
+/* Parameter RAM Entry field shift values */
+#define XTG_PARAM_ADDRMODE_SHIFT 24 /* Address mode */
+#define XTG_PARAM_INTERVALMODE_SHIFT 26 /* Interval mode */
+#define XTG_PARAM_IDMODE_SHIFT 28 /* Id mode */
+#define XTG_PARAM_OP_SHIFT 29 /* Opcode */
+
+/* PARAM RAM Opcode shift values */
+#define XTG_PARAM_COUNT_SHIFT 0 /* Repeat/Delay count */
+#define XTG_PARAM_DELAYRANGE_SHIFT 0 /* Delay range */
+#define XTG_PARAM_DELAY_SHIFT 8 /* FIXED RPT delay count */
+#define XTG_PARAM_ADDRRANGE_SHIFT 20 /* Address range */
+
+/* Parameter RAM Entry field mask values */
+#define XTG_PARAM_ADDRMODE_MASK 0x3 /* Address mode */
+#define XTG_PARAM_INTERVALMODE_MASK 0x3 /* Interval mode */
+#define XTG_PARAM_IDMODE_MASK 0x1 /* Id mode */
+#define XTG_PARAM_OP_MASK 0x7 /* Opcode */
+
+/* PARAM RAM Opcode mask values */
+#define XTG_PARAM_COUNT_MASK 0xFFFFFF/* Repeat/Delay count */
+#define XTG_PARAM_DELAYRANGE_MASK 0xFF /* Delay range */
+#define XTG_PARAM_DELAY_MASK 0xFFF /* FIXED RPT delay count */
+#define XTG_PARAM_ADDRRANGE_MASK 0xF /* Address range */
+
+/* PARAM RAM Opcode values */
+#define XTG_PARAM_OP_NOP 0x0 /* NOP mode */
+#define XTG_PARAM_OP_RPT 0x1 /* Repeat mode */
+#define XTG_PARAM_OP_DELAY 0x2 /* Delay mode */
+#define XTG_PARAM_OP_FIXEDRPT 0x3 /* Fixed repeat delay */
+
+/* Axi Traffic Generator Static Mode masks */
+#define XTG_STATIC_CNTL_TD_MASK 0x00000002 /* Transfer Done Mask */
+#define XTG_STATIC_CNTL_STEN_MASK 0x00000001 /* Static Enable Mask */
+#define XTG_STATIC_CNTL_RESET_MASK 0x00000000 /* Static Reset Mask */
+
+/* Axi Traffic Generator Stream Mode mask/shifts */
+#define XTG_STREAM_CNTL_STEN_MASK 0x00000001 /* Stream Enable Mask */
+#define XTG_STREAM_TL_TCNT_MASK 0xFFFF0000 /* Transfer Count Mask */
+#define XTG_STREAM_TL_TLEN_MASK 0x0000FFFF /* Transfer Length Mask */
+#define XTG_STREAM_TL_TCNT_SHIFT 16 /* Transfer Count Shift */
+
+/* Driver Specific Definitions */
+
+#define MAX_NUM_ENTRIES 256 /* Number of command entries per region */
+
+#define VALID_SIG 0xa5a5a5a5 /* Valid unique identifier */
+
+/* Internal RAM Sizes */
+#define XTG_PRM_RAM_BLOCK_SIZE 0x400 /* PRAM Block size (1KB) */
+#define XTG_CMD_RAM_BLOCK_SIZE 0x1000 /* CRAM Block size (4KB) */
+#define XTG_EXTCMD_RAM_BLOCK_SIZE 0x400 /**< Extended CMDRAM Block Size (1KB) */
+#define XTG_PARAM_RAM_SIZE 0x800 /* Parameter RAM (2KB) */
+#define XTG_COMMAND_RAM_SIZE 0x2000 /* Command RAM (8KB) */
+#define XTG_EXTCMD_RAM_SIZE 0x800 /* Command RAM (2KB) */
+#define XTG_MASTER_RAM_SIZE 0x2000 /* Master RAM (8KB) */
+
+/* RAM Access Flags */
+#define XTG_READ_RAM 0x0 /* Read RAM flag */
+#define XTG_WRITE_RAM 0x1 /* Write RAM flag */
+#define XTG_WRITE_RAM_ZERO 0x2 /* Write Zero flag */
+
+/* Bytes per entry */
+#define XTG_CRAM_BYTES_PER_ENTRY 16 /* CRAM bytes per entry */
+#define XTG_PRAM_BYTES_PER_ENTRY 4 /* PRAM bytes per entry */
+
+/* Interrupt Definitions */
+#define XTG_MASTER_CMP_INTR 0x1 /* Master complete intr flag */
+#define XTG_MASTER_ERR_INTR 0x2 /* Master error intr flag */
+#define XTG_SLAVE_ERR_INTR 0x4 /* Slave error intr flag */
+
+/*
+ * Version value of the trafgen core.
+ * For the initial IP release the version(v1.0) value is 0x47
+ * From the v2.0 IP and onwards the value starts from 0x20.
+ * For eg:
+ * v2.1 -> 0x21
+ * v2.2 -> 0x22 ... so on.
+ *
+ */
+#define XTG_INIT_VERSION 0x47 /* Trafgen initial version(v1.0) */
+
+/* Macro */
+#define to_xtg_dev_info(n) ((struct xtg_dev_info *)dev_get_drvdata(n))
+
+#define CMD_WDS 0x4 /* No of words in command ram per command */
+#define EXT_WDS 0x1 /* No of words in extended ram per command */
+#define MSB_INDEX 0x4
+/**
+ * struct xtg_cram - Command RAM structure
+ * @addr: Address Driven to a*_addr line
+ * @valid_cmd: Valid Command
+ * @last_addr: Last address
+ * @prot: Driven to a*_prot line
+ * @id: Driven to a*_id line
+ * @size: Driven to a*_size line
+ * @burst: Driven to a*_burst line
+ * @lock: Driven to a*_lock line
+ * @length: Driven to a*_len line
+ * @my_dpnd: My Depend command number
+ * @other_dpnd: Other depend command number
+ * @mram_idx: Master RAM index
+ * @qos: Driven to a*_qos line
+ * @user: Driven to a*_user line
+ * @cache: Driven to a*_cache line
+ * @expected_resp: Expected response
+ * @index: Command Index
+ * @is_write_block: Write/Read block
+ * @is_valid_req: Unique signature
+ *
+ * FIXME: This structure is shared with the user application and
+ * hence need to be synchronized. We know these kind of structures
+ * should not be defined in the driver and this need to be fixed
+ * if found a proper placeholder (in uapi/).
+ */
+struct xtg_cram {
+ phys_addr_t addr;
+ u32 valid_cmd;
+ u32 last_addr;
+ u32 prot;
+ u32 id;
+ u32 size;
+ u32 burst;
+ u32 lock;
+ u32 length;
+ u32 my_dpnd;
+ u32 other_dpnd;
+ u32 mram_idx;
+ u32 qos;
+ u32 user;
+ u32 cache;
+ u32 expected_resp;
+ u16 index;
+ bool is_write_block;
+ u32 is_valid_req;
+};
+
+/**
+ * struct xtg_pram - Parameter RAM structure
+ * @op_cntl0: Control field 0
+ * @op_cntl1: Control field 1
+ * @op_cntl2: Control field 2
+ * @addr_mode: Address mode
+ * @interval_mode: Interval mode
+ * @id_mode: Id mode
+ * @opcode: Opcode
+ * @index: Command Index
+ * @is_write_block: Write/Read block
+ * @is_valid_req: Unique signature
+ *
+ * FIXME: This structure is shared with the user application and
+ * hence need to be synchronized. We know these kind of structures
+ * should not be defined in the driver and this need to be fixed
+ * if found a proper placeholder (in uapi/).
+ */
+struct xtg_pram {
+ u32 op_cntl0;
+ u32 op_cntl1;
+ u32 op_cntl2;
+ u32 addr_mode;
+ u32 interval_mode;
+ u32 id_mode;
+ u32 opcode;
+ u16 index;
+ bool is_write_block;
+ u32 is_valid_req;
+};
+
+/**
+ * struct xtg_dev_info - Global Driver structure
+ * @regs: Iomapped base address
+ * @dev: Device structure
+ * @phys_base_addr: Physical base address
+ * @last_rd_valid_idx: Last Read Valid Command Index
+ * @last_wr_valid_idx: Last Write Valid Command Index
+ * @id: Device instance id
+ * @xtg_mram_offset: MasterRam offset
+ * @clk: Input clock
+ */
+struct xtg_dev_info {
+ void __iomem *regs;
+ struct device *dev;
+ phys_addr_t phys_base_addr;
+ s16 last_rd_valid_idx;
+ s16 last_wr_valid_idx;
+ u32 id;
+ u32 xtg_mram_offset;
+ struct clk *clk;
+};
+
+/**
+ * enum xtg_sysfs_ioctl - Ioctl opcodes
+ * @XTG_GET_MASTER_CMP_STS: get master complete status
+ * @XTG_GET_SLV_CTRL_REG: get slave control reg status
+ * @XTG_GET_ERR_STS: get error status
+ * @XTG_GET_CFG_STS: get config status
+ * @XTG_GET_LAST_VALID_INDEX: get last valid index
+ * @XTG_GET_DEVICE_ID: get device id
+ * @XTG_GET_RESOURCE: get resource
+ * @XTG_GET_STATIC_ENABLE: get staic mode traffic genration state
+ * @XTG_GET_STATIC_BURSTLEN: get static mode burst length
+ * @XTG_GET_STATIC_TRANSFERDONE: get static transfer done
+ * @XTG_GET_STREAM_ENABLE : get strean mode traffic genration state
+ * @XTG_GET_STREAM_TRANSFERLEN: get streaming mode transfer length
+ * @XTG_GET_STREAM_TRANSFERCNT: get streaming mode transfer count
+ * @XTG_GET_MASTER_LOOP_EN: get master loop enable status
+ * @XTG_START_MASTER_LOGIC: start master logic
+ * @XTG_SET_SLV_CTRL_REG: set slave control
+ * @XTG_CLEAR_ERRORS: clear errors
+ * @XTG_ENABLE_ERRORS: enable errors
+ * @XTG_ENABLE_INTRS: enable interrupts
+ * @XTG_CLEAR_MRAM: clear master ram
+ * @XTG_CLEAR_CRAM: clear command ram
+ * @XTG_CLEAR_PRAM: clear parameter ram
+ * @XTG_SET_STATIC_ENABLE: enable static mode traffic genration
+ * @XTG_SET_STATIC_DISABLE: disable static mode traffic genration
+ * @XTG_SET_STATIC_BURSTLEN: set static mode burst length
+ * @XTG_SET_STATIC_TRANSFERDONE: set static transfer done
+ * @XTG_SET_STREAM_ENABLE: enable streaming mode traffic genration
+ * @XTG_SET_STREAM_DISABLE: disable streaming mode traffic genration
+ * @XTG_SET_STREAM_TRANSFERLEN: set streaming mode transfer length
+ * @XTG_SET_STREAM_TRANSFERCNT: set streaming mode transfer count
+ * @XTG_MASTER_LOOP_EN: enable master loop
+ */
+enum xtg_sysfs_ioctl_opcode {
+ XTG_GET_MASTER_CMP_STS,
+ XTG_GET_SLV_CTRL_REG,
+ XTG_GET_ERR_STS,
+ XTG_GET_CFG_STS,
+ XTG_GET_LAST_VALID_INDEX,
+ XTG_GET_DEVICE_ID,
+ XTG_GET_RESOURCE,
+ XTG_GET_STATIC_ENABLE,
+ XTG_GET_STATIC_BURSTLEN,
+ XTG_GET_STATIC_TRANSFERDONE,
+ XTG_GET_STREAM_ENABLE,
+ XTG_GET_STREAM_TRANSFERLEN,
+ XTG_GET_MASTER_LOOP_EN,
+ XTG_GET_STREAM_TRANSFERCNT,
+ XTG_START_MASTER_LOGIC,
+ XTG_SET_SLV_CTRL_REG,
+ XTG_CLEAR_ERRORS,
+ XTG_ENABLE_ERRORS,
+ XTG_ENABLE_INTRS,
+ XTG_CLEAR_MRAM,
+ XTG_CLEAR_CRAM,
+ XTG_CLEAR_PRAM,
+ XTG_SET_STATIC_ENABLE,
+ XTG_SET_STATIC_DISABLE,
+ XTG_SET_STATIC_BURSTLEN,
+ XTG_SET_STATIC_TRANSFERDONE,
+ XTG_SET_STREAM_ENABLE,
+ XTG_SET_STREAM_DISABLE,
+ XTG_SET_STREAM_TRANSFERLEN,
+ XTG_SET_STREAM_TRANSFERCNT,
+ XTG_MASTER_LOOP_EN
+};
+
+/**
+ * xtg_access_rams - Write/Read Master/Command/Parameter RAM
+ * @tg: Pointer to xtg_dev_info structure
+ * @where: Offset from base
+ * @count: Number of bytes to write/read
+ * @flags: Read/Write/Write Zero
+ * @data: Data pointer
+ */
+static void xtg_access_rams(struct xtg_dev_info *tg, int where,
+ int count, int flags, u32 *data)
+{
+ u32 index;
+
+ switch (flags) {
+ case XTG_WRITE_RAM_ZERO:
+ memset_io(tg->regs + where, 0, count);
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ writel(0x0, tg->regs + where +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET) +
+ XTG_EXTCMD_RAM_BLOCK_SIZE - XTG_CMD_RAM_BLOCK_SIZE);
+#endif
+ break;
+ case XTG_WRITE_RAM:
+ for (index = 0; count > 0; index++, count -= 4)
+ writel(data[index], tg->regs + where + index * 4);
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ /*
+ * This additional logic is required only for command ram.
+ * when writing to READ Command RAM write higher address to READ addr
+ * RAM
+ */
+ if ((where >= XTG_COMMAND_RAM_OFFSET) &&
+ (where < XTG_WRITE_COMMAND_RAM_OFFSET))
+ writel(data[MSB_INDEX], tg->regs + XTG_COMMAND_RAM_OFFSET +
+ (where - XTG_COMMAND_RAM_OFFSET) / 4 +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET));
+ /*
+ * Writing to WRITE Command RAM write higher address to WRITE addr RAM
+ */
+ if ((where >= XTG_WRITE_COMMAND_RAM_OFFSET) &&
+ (where < XTG_COMMAND_RAM_MSB_OFFSET))
+ writel(data[MSB_INDEX], tg->regs +
+ XTG_WRITE_COMMAND_RAM_OFFSET +
+ (where - XTG_WRITE_COMMAND_RAM_OFFSET) / 4 +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET) +
+ XTG_EXTCMD_RAM_BLOCK_SIZE - XTG_CMD_RAM_BLOCK_SIZE);
+#endif
+ break;
+ case XTG_READ_RAM:
+ for (index = 0; count > 0; index++, count -= 4)
+ data[index] = readl(tg->regs + where + index * 4);
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if ((where >= XTG_COMMAND_RAM_OFFSET) &&
+ (where < XTG_WRITE_COMMAND_RAM_OFFSET))
+ data[MSB_INDEX] = readl(tg->regs + XTG_COMMAND_RAM_OFFSET +
+ (where - XTG_COMMAND_RAM_OFFSET) / 4 +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET));
+
+ if ((where >= XTG_WRITE_COMMAND_RAM_OFFSET) &&
+ (where < XTG_COMMAND_RAM_MSB_OFFSET))
+ data[MSB_INDEX] = readl(tg->regs +
+ XTG_WRITE_COMMAND_RAM_OFFSET +
+ (where - XTG_WRITE_COMMAND_RAM_OFFSET) / 4 +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET) +
+ XTG_EXTCMD_RAM_BLOCK_SIZE - XTG_CMD_RAM_BLOCK_SIZE);
+#endif
+ break;
+ }
+}
+
+/**
+ * xtg_prepare_cmd_words - Prepares all four Command RAM words
+ * @tg: Pointer to xtg_dev_info structure
+ * @cmdp: Pointer to xtg_cram structure
+ * @cmd_words: Pointer to Command Words that needs to be prepared
+ */
+static void xtg_prepare_cmd_words(struct xtg_dev_info *tg,
+ const struct xtg_cram *cmdp, u32 *cmd_words)
+{
+ /* Command Word 0 */
+ cmd_words[0] = lower_32_bits(cmdp->addr);
+
+ /* Command Word 4 */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ cmd_words[MSB_INDEX] = upper_32_bits(cmdp->addr);
+#endif
+
+ /* Command Word 1 */
+ cmd_words[1] = 0;
+ cmd_words[1] |= (cmdp->length & XTG_LEN_MASK) << XTG_LEN_SHIFT;
+ cmd_words[1] |= (cmdp->lock & XTG_LOCK_MASK) << XTG_LOCK_SHIFT;
+ cmd_words[1] |= (cmdp->burst & XTG_BURST_MASK) << XTG_BURST_SHIFT;
+ cmd_words[1] |= (cmdp->size & XTG_SIZE_MASK) << XTG_SIZE_SHIFT;
+ cmd_words[1] |= (cmdp->id & XTG_ID_MASK) << XTG_ID_SHIFT;
+ cmd_words[1] |= (cmdp->prot & XTG_PROT_MASK) << XTG_PROT_SHIFT;
+ cmd_words[1] |= (cmdp->last_addr & XTG_LAST_ADDR_MASK) <<
+ XTG_LAST_ADDR_SHIFT;
+ cmd_words[1] |= (cmdp->valid_cmd & XTG_VALID_CMD_MASK) <<
+ XTG_VALID_CMD_SHIFT;
+
+ /* Command Word 2 */
+ cmd_words[2] = 0;
+ cmd_words[2] |= (cmdp->mram_idx & XTG_MSTRAM_INDEX_MASK) <<
+ XTG_MSTRAM_INDEX_SHIFT;
+ cmd_words[2] |= (cmdp->other_dpnd & XTG_OTHER_DEPEND_MASK) <<
+ XTG_OTHER_DEPEND_SHIFT;
+ cmd_words[2] |= (cmdp->my_dpnd & XTG_MY_DEPEND_MASK) <<
+ XTG_MY_DEPEND_SHIFT;
+
+ /* Command Word 3 */
+ cmd_words[3] = 0;
+ cmd_words[3] |= (cmdp->qos & XTG_QOS_MASK) << XTG_QOS_SHIFT;
+ cmd_words[3] |= (cmdp->user & XTG_USER_MASK) << XTG_USER_SHIFT;
+ cmd_words[3] |= (cmdp->cache & XTG_CACHE_MASK) << XTG_CACHE_SHIFT;
+ cmd_words[3] |= (cmdp->expected_resp & XTG_EXPECTED_RESP_MASK) <<
+ XTG_EXPECTED_RESP_SHIFT;
+}
+
+/**
+ * xtg_prepare_param_words - Prepares Parameter RAM word
+ * @tg: Pointer to xtg_dev_info structure
+ * @cmdp: Pointer to xtg_pram structure
+ * @param_word: Pointer to Param Word that needs to be prepared
+ */
+static void xtg_prepare_param_word(struct xtg_dev_info *tg,
+ const struct xtg_pram *cmdp, u32 *param_word)
+{
+ *param_word = 0;
+ *param_word |= (cmdp->opcode & XTG_PARAM_OP_MASK) << XTG_PARAM_OP_SHIFT;
+ *param_word |= (cmdp->addr_mode & XTG_PARAM_ADDRMODE_MASK) <<
+ XTG_PARAM_ADDRMODE_SHIFT;
+ *param_word |= (cmdp->id_mode & XTG_PARAM_IDMODE_MASK) <<
+ XTG_PARAM_IDMODE_SHIFT;
+ *param_word |= (cmdp->interval_mode & XTG_PARAM_INTERVALMODE_MASK) <<
+ XTG_PARAM_INTERVALMODE_SHIFT;
+
+ switch (cmdp->opcode) {
+ case XTG_PARAM_OP_RPT:
+ case XTG_PARAM_OP_DELAY:
+ *param_word |= (cmdp->op_cntl0 & XTG_PARAM_COUNT_MASK) <<
+ XTG_PARAM_COUNT_SHIFT;
+ break;
+
+ case XTG_PARAM_OP_FIXEDRPT:
+ *param_word |= (cmdp->op_cntl0 & XTG_PARAM_ADDRRANGE_MASK) <<
+ XTG_PARAM_ADDRRANGE_SHIFT;
+ *param_word |= (cmdp->op_cntl1 & XTG_PARAM_DELAY_MASK) <<
+ XTG_PARAM_DELAY_SHIFT;
+ *param_word |= (cmdp->op_cntl2 & XTG_PARAM_DELAYRANGE_MASK) <<
+ XTG_PARAM_DELAYRANGE_SHIFT;
+ break;
+
+ case XTG_PARAM_OP_NOP:
+ *param_word = 0;
+ break;
+ }
+}
+
+/**
+ * xtg_sysfs_ioctl - Implements sysfs operations
+ * @dev: Device structure
+ * @buf: Value to write
+ * @opcode: Ioctl opcode
+ *
+ * Return: value read from the sysfs opcode.
+ */
+static ssize_t xtg_sysfs_ioctl(struct device *dev, const char *buf,
+ enum xtg_sysfs_ioctl_opcode opcode)
+{
+ struct xtg_dev_info *tg = to_xtg_dev_info(dev);
+ unsigned long wrval;
+ ssize_t status, rdval = 0;
+
+ if (opcode > XTG_GET_STREAM_TRANSFERCNT) {
+ status = kstrtoul(buf, 0, &wrval);
+ if (status < 0)
+ return status;
+ }
+
+ switch (opcode) {
+ case XTG_GET_MASTER_CMP_STS:
+ rdval = (readl(tg->regs + XTG_MCNTL_OFFSET) &
+ XTG_MCNTL_MSTEN_MASK) ? 1 : 0;
+ break;
+
+ case XTG_GET_MASTER_LOOP_EN:
+ rdval = (readl(tg->regs + XTG_MCNTL_OFFSET) &
+ XTG_MCNTL_LOOPEN_MASK) ? 1 : 0;
+ break;
+
+ case XTG_GET_SLV_CTRL_REG:
+ rdval = readl(tg->regs + XTG_SCNTL_OFFSET);
+ break;
+
+ case XTG_GET_ERR_STS:
+ rdval = readl(tg->regs + XTG_ERR_STS_OFFSET) &
+ XTG_ERR_ALL_ERRS_MASK;
+ break;
+
+ case XTG_GET_CFG_STS:
+ rdval = readl(tg->regs + XTG_CFG_STS_OFFSET);
+ break;
+
+ case XTG_GET_LAST_VALID_INDEX:
+ rdval = (((tg->last_wr_valid_idx << 16) & 0xffff0000) |
+ (tg->last_rd_valid_idx & 0xffff));
+ break;
+
+ case XTG_GET_DEVICE_ID:
+ rdval = tg->id;
+ break;
+
+ case XTG_GET_RESOURCE:
+ rdval = (unsigned long)tg->regs;
+ break;
+
+ case XTG_GET_STATIC_ENABLE:
+ rdval = readl(tg->regs + XTG_STATIC_CNTL_OFFSET);
+ break;
+
+ case XTG_GET_STATIC_BURSTLEN:
+ rdval = readl(tg->regs + XTG_STATIC_LEN_OFFSET);
+ break;
+
+ case XTG_GET_STATIC_TRANSFERDONE:
+ rdval = (readl(tg->regs + XTG_STATIC_CNTL_OFFSET) &
+ XTG_STATIC_CNTL_TD_MASK);
+ break;
+
+ case XTG_GET_STREAM_ENABLE:
+ rdval = readl(tg->regs + XTG_STREAM_CNTL_OFFSET);
+ break;
+
+ case XTG_GET_STREAM_TRANSFERLEN:
+ rdval = (readl(tg->regs + XTG_STREAM_TL_OFFSET) &
+ XTG_STREAM_TL_TLEN_MASK);
+ break;
+
+ case XTG_GET_STREAM_TRANSFERCNT:
+ rdval = ((readl(tg->regs + XTG_STREAM_TL_OFFSET) &
+ XTG_STREAM_TL_TCNT_MASK) >>
+ XTG_STREAM_TL_TCNT_SHIFT);
+ break;
+
+ case XTG_START_MASTER_LOGIC:
+ if (wrval)
+ writel(readl(tg->regs + XTG_MCNTL_OFFSET) |
+ XTG_MCNTL_MSTEN_MASK,
+ tg->regs + XTG_MCNTL_OFFSET);
+ break;
+
+ case XTG_MASTER_LOOP_EN:
+ if (wrval)
+ writel(readl(tg->regs + XTG_MCNTL_OFFSET) |
+ XTG_MCNTL_LOOPEN_MASK,
+ tg->regs + XTG_MCNTL_OFFSET);
+ else
+ writel(readl(tg->regs + XTG_MCNTL_OFFSET) &
+ ~XTG_MCNTL_LOOPEN_MASK,
+ tg->regs + XTG_MCNTL_OFFSET);
+ break;
+
+ case XTG_SET_SLV_CTRL_REG:
+ writel(wrval, tg->regs + XTG_SCNTL_OFFSET);
+ break;
+
+ case XTG_ENABLE_ERRORS:
+ wrval &= XTG_ERR_ALL_ERRS_MASK;
+ writel(wrval, tg->regs + XTG_ERR_EN_OFFSET);
+ break;
+
+ case XTG_CLEAR_ERRORS:
+ wrval &= XTG_ERR_ALL_ERRS_MASK;
+ writel(readl(tg->regs + XTG_ERR_STS_OFFSET) | wrval,
+ tg->regs + XTG_ERR_STS_OFFSET);
+ break;
+
+ case XTG_ENABLE_INTRS:
+ if (wrval & XTG_MASTER_CMP_INTR) {
+ pr_info("Enabling Master Complete Interrupt\n");
+ writel(readl(tg->regs + XTG_ERR_EN_OFFSET) |
+ XTG_ERR_EN_MSTIRQEN_MASK,
+ tg->regs + XTG_ERR_EN_OFFSET);
+ }
+ if (wrval & XTG_MASTER_ERR_INTR) {
+ pr_info("Enabling Interrupt on Master Errors\n");
+ writel(readl(tg->regs + XTG_MSTERR_INTR_OFFSET) |
+ XTG_MSTERR_INTR_MINTREN_MASK,
+ tg->regs + XTG_MSTERR_INTR_OFFSET);
+ }
+ if (wrval & XTG_SLAVE_ERR_INTR) {
+ pr_info("Enabling Interrupt on Slave Errors\n");
+ writel(readl(tg->regs + XTG_SCNTL_OFFSET) |
+ XTG_SCNTL_ERREN_MASK,
+ tg->regs + XTG_SCNTL_OFFSET);
+ }
+ break;
+
+ case XTG_CLEAR_MRAM:
+ xtg_access_rams(tg, tg->xtg_mram_offset,
+ XTG_MASTER_RAM_SIZE,
+ XTG_WRITE_RAM_ZERO, NULL);
+ break;
+
+ case XTG_CLEAR_CRAM:
+ xtg_access_rams(tg, XTG_COMMAND_RAM_OFFSET,
+ XTG_COMMAND_RAM_SIZE,
+ XTG_WRITE_RAM_ZERO, NULL);
+ break;
+
+ case XTG_CLEAR_PRAM:
+ xtg_access_rams(tg, XTG_PARAM_RAM_OFFSET,
+ XTG_PARAM_RAM_SIZE,
+ XTG_WRITE_RAM_ZERO, NULL);
+ break;
+
+ case XTG_SET_STATIC_ENABLE:
+ if (wrval) {
+ wrval &= XTG_STATIC_CNTL_STEN_MASK;
+ writel(readl(tg->regs + XTG_STATIC_CNTL_OFFSET) | wrval,
+ tg->regs + XTG_STATIC_CNTL_OFFSET);
+ } else {
+ writel(readl(tg->regs + XTG_STATIC_CNTL_OFFSET) &
+ ~XTG_STATIC_CNTL_STEN_MASK,
+ tg->regs + XTG_STATIC_CNTL_OFFSET);
+ }
+ break;
+
+ case XTG_SET_STATIC_BURSTLEN:
+ writel(wrval, tg->regs + XTG_STATIC_LEN_OFFSET);
+ break;
+
+ case XTG_SET_STATIC_TRANSFERDONE:
+ wrval |= XTG_STATIC_CNTL_TD_MASK;
+ writel(readl(tg->regs + XTG_STATIC_CNTL_OFFSET) | wrval,
+ tg->regs + XTG_STATIC_CNTL_OFFSET);
+ break;
+
+ case XTG_SET_STREAM_ENABLE:
+ if (wrval) {
+ rdval = readl(tg->regs + XTG_STREAM_CNTL_OFFSET);
+ rdval |= XTG_STREAM_CNTL_STEN_MASK,
+ writel(rdval,
+ tg->regs + XTG_STREAM_CNTL_OFFSET);
+ } else {
+ writel(readl(tg->regs + XTG_STREAM_CNTL_OFFSET) &
+ ~XTG_STREAM_CNTL_STEN_MASK,
+ tg->regs + XTG_STREAM_CNTL_OFFSET);
+ }
+ break;
+
+ case XTG_SET_STREAM_TRANSFERLEN:
+ wrval &= XTG_STREAM_TL_TLEN_MASK;
+ rdval = readl(tg->regs + XTG_STREAM_TL_OFFSET);
+ rdval &= ~XTG_STREAM_TL_TLEN_MASK;
+ writel(rdval | wrval,
+ tg->regs + XTG_STREAM_TL_OFFSET);
+ break;
+
+ case XTG_SET_STREAM_TRANSFERCNT:
+ wrval = ((wrval << XTG_STREAM_TL_TCNT_SHIFT) &
+ XTG_STREAM_TL_TCNT_MASK);
+ rdval = readl(tg->regs + XTG_STREAM_TL_OFFSET);
+ rdval = rdval & ~XTG_STREAM_TL_TCNT_MASK;
+ writel(rdval | wrval,
+ tg->regs + XTG_STREAM_TL_OFFSET);
+ break;
+
+ default:
+ break;
+ }
+
+ return rdval;
+}
+
+/* Sysfs functions */
+
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_DEVICE_ID);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t resource_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_RESOURCE);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+static DEVICE_ATTR_RO(resource);
+
+static ssize_t master_start_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_MASTER_CMP_STS);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t master_start_stop_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_START_MASTER_LOGIC);
+
+ return size;
+}
+static DEVICE_ATTR_RW(master_start_stop);
+
+static ssize_t config_slave_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_SLV_CTRL_REG);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+
+static ssize_t config_slave_status_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_SLV_CTRL_REG);
+
+ return size;
+}
+static DEVICE_ATTR_RW(config_slave_status);
+
+static ssize_t err_sts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_ERR_STS);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+
+static ssize_t err_sts_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_CLEAR_ERRORS);
+
+ return size;
+}
+static DEVICE_ATTR_RW(err_sts);
+
+static ssize_t err_en_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_ENABLE_ERRORS);
+
+ return size;
+}
+static DEVICE_ATTR_WO(err_en);
+
+static ssize_t intr_en_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_ENABLE_INTRS);
+
+ return size;
+}
+static DEVICE_ATTR_WO(intr_en);
+
+static ssize_t last_valid_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_LAST_VALID_INDEX);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+static DEVICE_ATTR_RO(last_valid_index);
+
+static ssize_t config_sts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_CFG_STS);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+static DEVICE_ATTR_RO(config_sts);
+
+static ssize_t mram_clear_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_CLEAR_MRAM);
+
+ return size;
+}
+static DEVICE_ATTR_WO(mram_clear);
+
+static ssize_t cram_clear_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_CLEAR_CRAM);
+
+ return size;
+}
+static DEVICE_ATTR_WO(cram_clear);
+
+static ssize_t pram_clear_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_CLEAR_CRAM);
+
+ return size;
+}
+static DEVICE_ATTR_WO(pram_clear);
+
+static ssize_t static_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STATIC_ENABLE);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+
+static ssize_t static_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STATIC_ENABLE);
+
+ return size;
+}
+static DEVICE_ATTR_RW(static_enable);
+
+static ssize_t static_burstlen_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STATIC_BURSTLEN);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t static_burstlen_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STATIC_BURSTLEN);
+
+ return size;
+}
+static DEVICE_ATTR_RW(static_burstlen);
+
+static ssize_t static_transferdone_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STATIC_TRANSFERDONE);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t static_transferdone_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STATIC_TRANSFERDONE);
+
+ return size;
+}
+static DEVICE_ATTR_RW(static_transferdone);
+
+static ssize_t reset_static_transferdone_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STATIC_TRANSFERDONE);
+
+ if (rdval == XTG_STATIC_CNTL_RESET_MASK)
+ rdval = 1;
+ else
+ rdval = 0;
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+static DEVICE_ATTR_RO(reset_static_transferdone);
+
+static ssize_t stream_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_ENABLE);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+
+static ssize_t stream_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_ENABLE);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_enable);
+
+static ssize_t stream_transferlen_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TRANSFERLEN);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_transferlen_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TRANSFERLEN);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_transferlen);
+
+static ssize_t stream_transfercnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TRANSFERCNT);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_transfercnt_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TRANSFERCNT);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_transfercnt);
+
+static ssize_t loop_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_MASTER_LOOP_EN);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t loop_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_MASTER_LOOP_EN);
+
+ return size;
+}
+static DEVICE_ATTR_RW(loop_enable);
+
+static ssize_t xtg_pram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ pr_info("No read access to Parameter RAM\n");
+
+ return 0;
+}
+
+static ssize_t xtg_pram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ u32 *data = (u32 *)buf;
+
+ if (off >= XTG_PARAM_RAM_SIZE) {
+ pr_err("Requested Write len exceeds 2K PRAM size\n");
+ return -ENOMEM;
+ }
+
+ if (count >= XTG_PARAM_RAM_SIZE)
+ count = XTG_PARAM_RAM_SIZE;
+
+ /* Program each command */
+ if (count == sizeof(struct xtg_pram)) {
+ struct xtg_pram *cmdp = (struct xtg_pram *)buf;
+ u32 param_word;
+
+ if (!cmdp)
+ return -EINVAL;
+
+ if (cmdp->is_valid_req == VALID_SIG) {
+ /* Prepare parameter word */
+ xtg_prepare_param_word(tg, cmdp, &param_word);
+
+ count = XTG_PRAM_BYTES_PER_ENTRY;
+ data = &param_word;
+
+ /* Maximum command entries are 256 */
+ if (cmdp->index > MAX_NUM_ENTRIES)
+ return -EINVAL;
+
+ /* Calculate the block index */
+ if (cmdp->is_write_block)
+ off = XTG_PRM_RAM_BLOCK_SIZE +
+ cmdp->index * count;
+ else
+ off = cmdp->index * count;
+ }
+ }
+
+ off += XTG_PARAM_RAM_OFFSET;
+ xtg_access_rams(tg, off, count, XTG_WRITE_RAM, data);
+
+ return count;
+}
+
+static int xtg_pram_mmap(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ int ret;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
+
+ ret = remap_pfn_range(vma, vma->vm_start, (tg->phys_base_addr +
+ XTG_PARAM_RAM_OFFSET) >> PAGE_SHIFT,
+ XTG_PARAM_RAM_SIZE, vma->vm_page_prot);
+ return ret;
+}
+
+static struct bin_attribute xtg_pram_attr = {
+ .attr = {
+ .name = "parameter_ram",
+ .mode = 0644,
+ },
+ .size = XTG_PARAM_RAM_SIZE,
+ .read = xtg_pram_read,
+ .write = xtg_pram_write,
+ .mmap = xtg_pram_mmap,
+};
+
+static ssize_t xtg_cram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+
+ off += XTG_COMMAND_RAM_OFFSET;
+ xtg_access_rams(tg, off, count, XTG_READ_RAM, (u32 *)buf);
+
+ return count;
+}
+
+static ssize_t xtg_cram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ u32 *data = (u32 *)buf;
+
+ if (off >= XTG_COMMAND_RAM_SIZE) {
+ pr_err("Requested Write len exceeds 8K CRAM size\n");
+ return -ENOMEM;
+ }
+
+ /* Program each command */
+ if (count == sizeof(struct xtg_cram)) {
+ struct xtg_cram *cmdp = (struct xtg_cram *)buf;
+ u32 cmd_words[CMD_WDS + EXT_WDS];
+
+ if (!cmdp)
+ return -EINVAL;
+
+ if (cmdp->is_valid_req == VALID_SIG) {
+ /* Prepare command words */
+ xtg_prepare_cmd_words(tg, cmdp, cmd_words);
+ count = XTG_CRAM_BYTES_PER_ENTRY;
+ data = cmd_words;
+
+ /* Maximum command entries are 256 */
+ if (cmdp->index > MAX_NUM_ENTRIES)
+ return -EINVAL;
+
+ /* Calculate the block index */
+ if (cmdp->is_write_block)
+ off = XTG_CMD_RAM_BLOCK_SIZE +
+ cmdp->index * count;
+ else
+ off = cmdp->index * count;
+
+ /* Store the valid command index */
+ if (cmdp->valid_cmd) {
+ if (cmdp->is_write_block)
+ tg->last_wr_valid_idx =
+ cmdp->index;
+ else
+ tg->last_rd_valid_idx =
+ cmdp->index;
+ }
+ }
+ }
+
+ off += XTG_COMMAND_RAM_OFFSET;
+ xtg_access_rams(tg, off, count, XTG_WRITE_RAM, data);
+
+ return count;
+}
+
+static int xtg_cram_mmap(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ int ret;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
+
+ ret = remap_pfn_range(vma, vma->vm_start, (tg->phys_base_addr +
+ XTG_COMMAND_RAM_OFFSET) >> PAGE_SHIFT,
+ XTG_COMMAND_RAM_SIZE + XTG_EXTCMD_RAM_SIZE,
+ vma->vm_page_prot);
+ return ret;
+}
+
+static struct bin_attribute xtg_cram_attr = {
+ .attr = {
+ .name = "command_ram",
+ .mode = 0644,
+ },
+ .size = XTG_COMMAND_RAM_SIZE,
+ .read = xtg_cram_read,
+ .write = xtg_cram_write,
+ .mmap = xtg_cram_mmap,
+};
+
+static ssize_t xtg_mram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+
+ off += tg->xtg_mram_offset;
+ xtg_access_rams(tg, off, count, XTG_READ_RAM, (u32 *)buf);
+
+ return count;
+}
+
+static ssize_t xtg_mram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+
+ if (off >= XTG_MASTER_RAM_SIZE) {
+ pr_err("Requested Write len exceeds 8K MRAM size\n");
+ return -ENOMEM;
+ }
+
+ off += tg->xtg_mram_offset;
+ xtg_access_rams(tg, off, count, XTG_WRITE_RAM, (u32 *)buf);
+
+ return count;
+}
+
+static int xtg_mram_mmap(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ int ret;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
+
+ ret = remap_pfn_range(vma, vma->vm_start, (tg->phys_base_addr +
+ tg->xtg_mram_offset) >> PAGE_SHIFT,
+ XTG_MASTER_RAM_SIZE,
+ vma->vm_page_prot);
+ return ret;
+}
+
+static struct bin_attribute xtg_mram_attr = {
+ .attr = {
+ .name = "master_ram",
+ .mode = 0644,
+ },
+ .size = XTG_MASTER_RAM_SIZE,
+ .read = xtg_mram_read,
+ .write = xtg_mram_write,
+ .mmap = xtg_mram_mmap,
+};
+
+static struct bin_attribute *xtg_bin_attrs[] = {
+ &xtg_mram_attr,
+ &xtg_pram_attr,
+ &xtg_cram_attr,
+ NULL,
+};
+
+static const struct attribute *xtg_attrs[] = {
+ &dev_attr_id.attr,
+ &dev_attr_resource.attr,
+ &dev_attr_master_start_stop.attr,
+ &dev_attr_config_slave_status.attr,
+ &dev_attr_err_en.attr,
+ &dev_attr_err_sts.attr,
+ &dev_attr_intr_en.attr,
+ &dev_attr_last_valid_index.attr,
+ &dev_attr_config_sts.attr,
+ &dev_attr_mram_clear.attr,
+ &dev_attr_cram_clear.attr,
+ &dev_attr_pram_clear.attr,
+ &dev_attr_static_enable.attr,
+ &dev_attr_static_burstlen.attr,
+ &dev_attr_static_transferdone.attr,
+ &dev_attr_stream_transfercnt.attr,
+ &dev_attr_stream_transferlen.attr,
+ &dev_attr_stream_enable.attr,
+ &dev_attr_reset_static_transferdone.attr,
+ &dev_attr_loop_enable.attr,
+ NULL,
+};
+
+static const struct attribute_group xtg_attributes = {
+ .attrs = (struct attribute **)xtg_attrs,
+ .bin_attrs = xtg_bin_attrs,
+};
+/**
+ * xtg_cmp_intr_handler - Master Complete Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the xtg_dev_info structure
+ *
+ * Return: IRQ_HANDLED always
+ */
+static irqreturn_t xtg_cmp_intr_handler(int irq, void *data)
+{
+ struct xtg_dev_info *tg = (struct xtg_dev_info *)data;
+
+ writel(readl(tg->regs + XTG_ERR_STS_OFFSET) |
+ XTG_ERR_STS_MSTDONE_MASK,
+ tg->regs + XTG_ERR_STS_OFFSET);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xtg_err_intr_handler - Master/Slave Error Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the xtg_dev_info structure
+ *
+ * Return: IRQ_HANDLED always
+ */
+static irqreturn_t xtg_err_intr_handler(int irq, void *data)
+{
+ struct xtg_dev_info *tg = (struct xtg_dev_info *)data;
+ u32 value;
+
+ value = readl(tg->regs + XTG_ERR_STS_OFFSET) &
+ XTG_ERR_ALL_ERRS_MASK;
+
+ if (value) {
+ dev_err(tg->dev, "Found errors 0x%08x\n", value);
+ writel(readl(tg->regs + XTG_ERR_STS_OFFSET) | value,
+ tg->regs + XTG_ERR_STS_OFFSET);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xtg_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This is the driver probe routine. It does all the memory
+ * allocation and creates sysfs entires for the device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xtg_probe(struct platform_device *pdev)
+{
+ struct xtg_dev_info *tg;
+ struct device_node *node;
+ struct resource *res;
+ struct device *dev;
+ int err, irq, var;
+
+ tg = devm_kzalloc(&pdev->dev, sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return -ENOMEM;
+
+ tg->dev = &(pdev->dev);
+ dev = tg->dev;
+ node = pdev->dev.of_node;
+
+ /* Map the registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tg->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tg->regs))
+ return PTR_ERR(tg->regs);
+
+
+ /* Save physical base address */
+ tg->phys_base_addr = res->start;
+
+ /* Get the device instance id */
+ err = of_property_read_u32(node, "xlnx,device-id", &tg->id);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to read property");
+ return err;
+ }
+
+ /* Map the error interrupt, if it exists in the device tree. */
+ irq = platform_get_irq_byname(pdev, "err-out");
+ if (irq < 0) {
+ dev_dbg(&pdev->dev, "unable to get err irq");
+ } else {
+ err = devm_request_irq(&pdev->dev, irq, xtg_err_intr_handler,
+ 0, dev_name(&pdev->dev), tg);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to request irq %d", irq);
+ return err;
+ }
+ }
+
+ /* Map the completion interrupt, if it exists in the device tree. */
+ irq = platform_get_irq_byname(pdev, "irq-out");
+ if (irq < 0) {
+ dev_dbg(&pdev->dev, "unable to get cmp irq");
+ } else {
+ err = devm_request_irq(&pdev->dev, irq, xtg_cmp_intr_handler,
+ 0, dev_name(&pdev->dev), tg);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to request irq %d", irq);
+ return err;
+ }
+ }
+
+ tg->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tg->clk)) {
+ if (PTR_ERR(tg->clk) != -ENOENT) {
+ if (PTR_ERR(tg->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "input clock not found\n");
+ return PTR_ERR(tg->clk);
+ }
+ tg->clk = NULL;
+ }
+
+ err = clk_prepare_enable(tg->clk);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return err;
+ }
+
+ /*
+ * Create sysfs file entries for the device
+ */
+ err = sysfs_create_group(&dev->kobj, &xtg_attributes);
+ if (err < 0) {
+ dev_err(tg->dev, "unable to create sysfs entries\n");
+ clk_disable_unprepare(tg->clk);
+ return err;
+ }
+
+ /*
+ * Initialize the write and read valid index values.
+ * Possible range of values for these variables is <0 255>.
+ */
+ tg->last_wr_valid_idx = -1;
+ tg->last_rd_valid_idx = -1;
+
+ dev_set_drvdata(&pdev->dev, tg);
+
+ /* Update the Proper MasterRam offset */
+ tg->xtg_mram_offset = XTG_MASTER_RAM_OFFSET;
+ var = readl(tg->regs + XTG_MCNTL_OFFSET) >> XTG_MCNTL_REV_SHIFT;
+ if (var == XTG_INIT_VERSION)
+ tg->xtg_mram_offset = XTG_MASTER_RAM_INIT_OFFSET;
+
+ dev_info(&pdev->dev, "Probing xilinx traffic generator success\n");
+
+ return 0;
+}
+
+/**
+ * xtg_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function frees all the resources allocated to the device.
+ *
+ * Return: 0 always
+ */
+static int xtg_remove(struct platform_device *pdev)
+{
+ struct xtg_dev_info *tg;
+ struct device *dev;
+
+ tg = dev_get_drvdata(&pdev->dev);
+ dev = tg->dev;
+ sysfs_remove_group(&dev->kobj, &xtg_attributes);
+ clk_disable_unprepare(tg->clk);
+
+ return 0;
+}
+
+static const struct of_device_id xtg_of_match[] = {
+ { .compatible = "xlnx,axi-traffic-gen", },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, xtg_of_match);
+
+static struct platform_driver xtg_driver = {
+ .driver = {
+ .name = "xilinx-trafgen",
+ .of_match_table = xtg_of_match,
+ },
+ .probe = xtg_probe,
+ .remove = xtg_remove,
+};
+
+module_platform_driver(xtg_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx Traffic Generator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index fe914ff5f5d6..128089b9f11c 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1020,6 +1020,23 @@ retry:
goto free_card;
/*
+ * If the card is already in 1.8V and the system doesn't have
+ * mechanism to power cycle the SD card, it will respond with no 1.8V
+ * supported in OCR response. Below check will confirm if the above
+ * condition has occurred and set the rocr flag accordingly.
+ *
+ * If the host is supporting UHS modes and the card is supporting SD
+ * specification 3.0 and above, it can operate at UHS modes.
+ */
+ if (mmc_host_uhs(host) && card->scr.sda_spec3 &&
+ card->sw_caps.sd3_bus_mode >= SD_MODE_UHS_SDR50) {
+ rocr |= SD_ROCR_S18A;
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+ if (err)
+ goto free_card;
+ }
+
+ /*
* If the card has not been power cycled, it may still be using 1.8V
* signaling. Detect that situation and try to initialize a UHS-I (1.8V)
* transfer mode.
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index b12abf9b15f2..c72382d9ea9d 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -18,20 +18,63 @@
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/phy/phy.h>
+#include <linux/mmc/mmc.h>
+#include <linux/soc/xilinx/zynqmp/tap_delays.h>
+#include <linux/soc/xilinx/zynqmp/fw.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/slab.h>
#include "cqhci.h"
#include "sdhci-pltfm.h"
#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#define SDHCI_ARASAN_ITAPDLY_REGISTER 0xF0F8
+#define SDHCI_ARASAN_OTAPDLY_REGISTER 0xF0FC
+
#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
#define VENDOR_ENHANCED_STROBE BIT(0)
+#define CLK_CTRL_TIMEOUT_SHIFT 16
+#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
+#define CLK_CTRL_TIMEOUT_MIN_EXP 13
+#define SD_CLK_25_MHZ 25000000
+#define SD_CLK_19_MHZ 19000000
+#define MAX_TUNING_LOOP 40
#define PHY_CLK_TOO_SLOW_HZ 400000
+#define SDHCI_ITAPDLY_CHGWIN 0x200
+#define SDHCI_ITAPDLY_ENABLE 0x100
+#define SDHCI_OTAPDLY_ENABLE 0x40
+
+#define SDHCI_ITAPDLYSEL_SD_HSD 0x15
+#define SDHCI_ITAPDLYSEL_SDR25 0x15
+#define SDHCI_ITAPDLYSEL_SDR50 0x0
+#define SDHCI_ITAPDLYSEL_SDR104_B2 0x0
+#define SDHCI_ITAPDLYSEL_SDR104_B0 0x0
+#define SDHCI_ITAPDLYSEL_MMC_HSD 0x15
+#define SDHCI_ITAPDLYSEL_SD_DDR50 0x3D
+#define SDHCI_ITAPDLYSEL_MMC_DDR52 0x12
+#define SDHCI_ITAPDLYSEL_MMC_HS200_B2 0x0
+#define SDHCI_ITAPDLYSEL_MMC_HS200_B0 0x0
+#define SDHCI_OTAPDLYSEL_SD_HSD 0x05
+#define SDHCI_OTAPDLYSEL_SDR25 0x05
+#define SDHCI_OTAPDLYSEL_SDR50 0x03
+#define SDHCI_OTAPDLYSEL_SDR104_B0 0x03
+#define SDHCI_OTAPDLYSEL_SDR104_B2 0x02
+#define SDHCI_OTAPDLYSEL_MMC_HSD 0x06
+#define SDHCI_OTAPDLYSEL_SD_DDR50 0x04
+#define SDHCI_OTAPDLYSEL_MMC_DDR52 0x06
+#define SDHCI_OTAPDLYSEL_MMC_HS200_B0 0x03
+#define SDHCI_OTAPDLYSEL_MMC_HS200_B2 0x02
+
+#define MMC_BANK2 0x2
+
/*
* On some SoCs the syscon area has a feature where the upper 16-bits of
* each 32-bit register act as a write mask for the lower 16-bits. This allows
@@ -86,6 +129,10 @@ struct sdhci_arasan_data {
struct sdhci_host *host;
struct clk *clk_ahb;
struct phy *phy;
+ u32 mio_bank;
+ u32 device_id;
+ u32 itapdly[MMC_TIMING_MMC_HS400 + 1];
+ u32 otapdly[MMC_TIMING_MMC_HS400 + 1];
bool is_phy_on;
bool has_cqe;
@@ -93,6 +140,8 @@ struct sdhci_arasan_data {
struct clk *sdcardclk;
struct regmap *soc_ctl_base;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
unsigned int quirks; /* Arasan deviations from spec */
@@ -101,6 +150,8 @@ struct sdhci_arasan_data {
/* Controller immediately reports SDHCI_CLOCK_INT_STABLE after enabling the
* internal clock even when the clock isn't stable */
#define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1)
+/* Controller has tap delay setting registers in it's local reg space */
+#define SDHCI_ARASAN_TAPDELAY_REG_LOCAL BIT(2)
};
struct sdhci_arasan_of_data {
@@ -164,6 +215,236 @@ static int sdhci_arasan_syscon_write(struct sdhci_host *host,
return ret;
}
+static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u8 deviceid)
+{
+ u16 clk;
+ unsigned long timeout;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN);
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ /* Issue DLL Reset */
+ zynqmp_dll_reset(deviceid);
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk |= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ /* Wait max 20 ms */
+ timeout = 20;
+ while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+ & SDHCI_CLOCK_INT_STABLE)) {
+ if (timeout == 0) {
+ dev_err(mmc_dev(host->mmc),
+ ": Internal clock never stabilised.\n");
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+}
+
+static int arasan_zynqmp_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct mmc_host *mmc = host->mmc;
+ u16 ctrl;
+ int tuning_loop_counter = MAX_TUNING_LOOP;
+ int err = 0;
+ unsigned long flags;
+ unsigned int tuning_count = 0;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->tuning_mode == SDHCI_TUNING_MODE_1)
+ tuning_count = host->tuning_count;
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl |= SDHCI_CTRL_EXEC_TUNING;
+ if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
+ ctrl |= SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+ mdelay(1);
+
+ arasan_zynqmp_dll_reset(host, sdhci_arasan->device_id);
+
+ /*
+ * As per the Host Controller spec v3.00, tuning command
+ * generates Buffer Read Ready interrupt, so enable that.
+ *
+ * Note: The spec clearly says that when tuning sequence
+ * is being performed, the controller does not generate
+ * interrupts other than Buffer Read Ready interrupt. But
+ * to make sure we don't hit a controller bug, we _only_
+ * enable Buffer Read Ready interrupt here.
+ */
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
+
+ /*
+ * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
+ * of loops reaches 40 times or a timeout of 150ms occurs.
+ */
+ do {
+ struct mmc_command cmd = {0};
+ struct mmc_request mrq = {NULL};
+
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+ cmd.retries = 0;
+ cmd.data = NULL;
+ cmd.mrq = &mrq;
+ cmd.error = 0;
+
+ if (tuning_loop_counter-- == 0)
+ break;
+
+ mrq.cmd = &cmd;
+
+ /*
+ * In response to CMD19, the card sends 64 bytes of tuning
+ * block to the Host Controller. So we set the block size
+ * to 64 here.
+ */
+ if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
+ SDHCI_BLOCK_SIZE);
+ } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
+ SDHCI_BLOCK_SIZE);
+ }
+ } else {
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
+ SDHCI_BLOCK_SIZE);
+ }
+
+ /*
+ * The tuning block is sent by the card to the host controller.
+ * So we set the TRNS_READ bit in the Transfer Mode register.
+ * This also takes care of setting DMA Enable and Multi Block
+ * Select in the same register to 0.
+ */
+ sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
+
+ sdhci_send_command(host, &cmd);
+
+ host->cmd = NULL;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ /* Wait for Buffer Read Ready interrupt */
+ wait_event_interruptible_timeout(host->buf_ready_int,
+ (host->tuning_done == 1),
+ msecs_to_jiffies(50));
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (!host->tuning_done) {
+ dev_warn(mmc_dev(host->mmc),
+ ": Timeout for Buffer Read Ready interrupt, back to fixed sampling clock\n");
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ ctrl &= ~SDHCI_CTRL_TUNED_CLK;
+ ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+
+ err = -EIO;
+ goto out;
+ }
+
+ host->tuning_done = 0;
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ /* eMMC spec does not require a delay between tuning cycles */
+ if (opcode == MMC_SEND_TUNING_BLOCK)
+ mdelay(1);
+ } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
+
+ /*
+ * The Host Driver has exhausted the maximum number of loops allowed,
+ * so use fixed sampling frequency.
+ */
+ if (tuning_loop_counter < 0) {
+ ctrl &= ~SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ }
+ if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
+ dev_warn(mmc_dev(host->mmc),
+ ": Tuning failed, back to fixed sampling clock\n");
+ err = -EIO;
+ } else {
+ arasan_zynqmp_dll_reset(host, sdhci_arasan->device_id);
+ }
+
+out:
+ /*
+ * In case tuning fails, host controllers which support
+ * re-tuning can try tuning again at a later time, when the
+ * re-tuning timer expires. So for these controllers, we
+ * return 0. Since there might be other controllers who do not
+ * have this capability, we return error for them.
+ */
+ if (tuning_count)
+ err = 0;
+
+ host->mmc->retune_period = err ? 0 : tuning_count;
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return err;
+}
+
+static void __arasan_set_tap_delay(struct sdhci_host *host, u8 itap_delay,
+ u8 otap_delay)
+{
+ u32 regval;
+
+ if (itap_delay) {
+ regval = sdhci_readl(host, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval |= SDHCI_ITAPDLY_CHGWIN;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval |= SDHCI_ITAPDLY_ENABLE;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval |= itap_delay;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval &= ~SDHCI_ITAPDLY_CHGWIN;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ }
+
+ if (otap_delay) {
+ regval = sdhci_readl(host, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ regval |= SDHCI_OTAPDLY_ENABLE;
+ sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ regval |= otap_delay;
+ sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ }
+}
+
+static void arasan_set_tap_delay(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ u8 itap_delay;
+ u8 otap_delay;
+
+ itap_delay = sdhci_arasan->itapdly[host->timing];
+ otap_delay = sdhci_arasan->otapdly[host->timing];
+
+ if (sdhci_arasan->quirks & SDHCI_ARASAN_TAPDELAY_REG_LOCAL)
+ __arasan_set_tap_delay(host, itap_delay, otap_delay);
+ else
+ arasan_zynqmp_set_tap_delay(sdhci_arasan->device_id,
+ itap_delay, otap_delay);
+}
+
static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -204,6 +485,17 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
}
}
+ /* Set the Input and Output Tap Delays */
+ if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_STANDARD_25_BROKEN) &&
+ (host->version >= SDHCI_SPEC_300)) {
+ if (clock == SD_CLK_25_MHZ)
+ clock = SD_CLK_19_MHZ;
+ if ((host->timing != MMC_TIMING_LEGACY) &&
+ (host->timing != MMC_TIMING_UHS_SDR12)) {
+ arasan_set_tap_delay(host);
+ }
+ }
+
if (ctrl_phy && sdhci_arasan->is_phy_on) {
phy_power_off(sdhci_arasan->phy);
sdhci_arasan->is_phy_on = false;
@@ -290,7 +582,7 @@ static void sdhci_arasan_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power_noreg(host, mode, vdd);
}
-static const struct sdhci_ops sdhci_arasan_ops = {
+static struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -302,7 +594,6 @@ static const struct sdhci_ops sdhci_arasan_ops = {
static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
.ops = &sdhci_arasan_ops,
- .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
SDHCI_QUIRK2_STOP_WITH_TC,
@@ -373,6 +664,75 @@ static struct sdhci_arasan_of_data sdhci_arasan_rk3399_data = {
.pdata = &sdhci_arasan_cqe_pdata,
};
+#ifdef CONFIG_PM
+/**
+ * sdhci_arasan_runtime_suspend - Suspend method for the driver
+ * @dev: Address of the device structure
+ * Put the device in a low power state.
+ *
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_arasan_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ ret = sdhci_runtime_suspend_host(host);
+ if (ret)
+ return ret;
+
+ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
+ mmc_retune_needed(host->mmc);
+
+ clk_disable(pltfm_host->clk);
+ clk_disable(sdhci_arasan->clk_ahb);
+
+ return 0;
+}
+
+/**
+ * sdhci_arasan_runtime_resume - Resume method for the driver
+ * @dev: Address of the device structure
+ * Resume operation after suspend
+ *
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_arasan_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ ret = clk_enable(sdhci_arasan->clk_ahb);
+ if (ret) {
+ dev_err(dev, "Cannot enable AHB clock.\n");
+ return ret;
+ }
+
+ ret = clk_enable(pltfm_host->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable SD clock.\n");
+ return ret;
+ }
+
+ ret = sdhci_runtime_resume_host(host, 0);
+ if (ret)
+ goto out;
+
+ return 0;
+out:
+ clk_disable(pltfm_host->clk);
+ clk_disable(sdhci_arasan->clk_ahb);
+
+ return ret;
+}
+#endif /* ! CONFIG_PM */
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -465,8 +825,11 @@ static int sdhci_arasan_resume(struct device *dev)
}
#endif /* ! CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend,
- sdhci_arasan_resume);
+static const struct dev_pm_ops sdhci_arasan_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_arasan_suspend, sdhci_arasan_resume)
+ SET_RUNTIME_PM_OPS(sdhci_arasan_runtime_suspend,
+ sdhci_arasan_runtime_resume, NULL)
+};
static const struct of_device_id sdhci_arasan_of_match[] = {
/* SoC-specific compatible strings w/ soc_ctl_map */
@@ -487,6 +850,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "arasan,sdhci-4.9a",
.data = &sdhci_arasan_data,
},
+ {
+ .compatible = "xlnx,zynqmp-8.9a",
+ .data = &sdhci_arasan_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
@@ -714,6 +1081,181 @@ cleanup:
return ret;
}
+/**
+ * arasan_zynqmp_dt_parse_tap_delays - Read Tap Delay values from DT
+ *
+ * Called at initialization to parse the values of Tap Delays.
+ *
+ * @dev: Pointer to our struct device.
+ */
+static void arasan_zynqmp_dt_parse_tap_delays(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct device_node *np = dev->of_node;
+ u32 *itapdly = sdhci_arasan->itapdly;
+ u32 *otapdly = sdhci_arasan->otapdly;
+ int ret;
+
+ /*
+ * Read Tap Delay values from DT, if the DT does not contain the
+ * Tap Values then use the pre-defined values
+ */
+ ret = of_property_read_u32(np, "xlnx,itap-delay-sd-hsd",
+ &itapdly[MMC_TIMING_SD_HS]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined itapdly for MMC_TIMING_SD_HS\n");
+ itapdly[MMC_TIMING_SD_HS] = SDHCI_ITAPDLYSEL_SD_HSD;
+ }
+
+ ret = of_property_read_u32(np, "xlnx,otap-delay-sd-hsd",
+ &otapdly[MMC_TIMING_SD_HS]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined otapdly for MMC_TIMING_SD_HS\n");
+ otapdly[MMC_TIMING_SD_HS] = SDHCI_OTAPDLYSEL_SD_HSD;
+ }
+
+ ret = of_property_read_u32(np, "xlnx,itap-delay-sdr25",
+ &itapdly[MMC_TIMING_UHS_SDR25]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined itapdly for MMC_TIMING_UHS_SDR25\n");
+ itapdly[MMC_TIMING_UHS_SDR25] = SDHCI_ITAPDLYSEL_SDR25;
+ }
+
+ ret = of_property_read_u32(np, "xlnx,otap-delay-sdr25",
+ &otapdly[MMC_TIMING_UHS_SDR25]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined otapdly for MMC_TIMING_UHS_SDR25\n");
+ otapdly[MMC_TIMING_UHS_SDR25] = SDHCI_OTAPDLYSEL_SDR25;
+ }
+
+ ret = of_property_read_u32(np, "xlnx,itap-delay-sdr50",
+ &itapdly[MMC_TIMING_UHS_SDR50]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined itapdly for MMC_TIMING_UHS_SDR50\n");
+ itapdly[MMC_TIMING_UHS_SDR50] = SDHCI_ITAPDLYSEL_SDR50;
+ }
+
+ ret = of_property_read_u32(np, "xlnx,otap-delay-sdr50",
+ &otapdly[MMC_TIMING_UHS_SDR50]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined otapdly for MMC_TIMING_UHS_SDR50\n");
+ otapdly[MMC_TIMING_UHS_SDR50] = SDHCI_OTAPDLYSEL_SDR50;
+ }
+
+ ret = of_property_read_u32(np, "xlnx,itap-delay-sd-ddr50",
+ &itapdly[MMC_TIMING_UHS_DDR50]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined itapdly for MMC_TIMING_UHS_DDR50\n");
+ itapdly[MMC_TIMING_UHS_DDR50] = SDHCI_ITAPDLYSEL_SD_DDR50;
+ }
+
+ ret = of_property_read_u32(np, "xlnx,otap-delay-sd-ddr50",
+ &otapdly[MMC_TIMING_UHS_DDR50]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined otapdly for MMC_TIMING_UHS_DDR50\n");
+ otapdly[MMC_TIMING_UHS_DDR50] = SDHCI_OTAPDLYSEL_SD_DDR50;
+ }
+
+ ret = of_property_read_u32(np, "xlnx,itap-delay-mmc-hsd",
+ &itapdly[MMC_TIMING_MMC_HS]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined itapdly for MMC_TIMING_MMC_HS\n");
+ itapdly[MMC_TIMING_MMC_HS] = SDHCI_ITAPDLYSEL_MMC_HSD;
+ }
+
+ ret = of_property_read_u32(np, "xlnx,otap-delay-mmc-hsd",
+ &otapdly[MMC_TIMING_MMC_HS]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined otapdly for MMC_TIMING_MMC_HS\n");
+ otapdly[MMC_TIMING_MMC_HS] = SDHCI_OTAPDLYSEL_MMC_HSD;
+ }
+
+ ret = of_property_read_u32(np, "xlnx,itap-delay-mmc-ddr52",
+ &itapdly[MMC_TIMING_MMC_DDR52]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined itapdly for MMC_TIMING_MMC_DDR52\n");
+ itapdly[MMC_TIMING_MMC_DDR52] = SDHCI_ITAPDLYSEL_MMC_DDR52;
+ }
+
+ ret = of_property_read_u32(np, "xlnx,otap-delay-mmc-ddr52",
+ &otapdly[MMC_TIMING_MMC_DDR52]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined otapdly for MMC_TIMING_MMC_DDR52\n");
+ otapdly[MMC_TIMING_MMC_DDR52] = SDHCI_OTAPDLYSEL_MMC_DDR52;
+ }
+
+ ret = of_property_read_u32(np, "xlnx,itap-delay-sdr104",
+ &itapdly[MMC_TIMING_UHS_SDR104]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined itapdly for MMC_TIMING_UHS_SDR104\n");
+ if (sdhci_arasan->mio_bank == MMC_BANK2) {
+ itapdly[MMC_TIMING_UHS_SDR104] =
+ SDHCI_ITAPDLYSEL_SDR104_B2;
+ } else {
+ itapdly[MMC_TIMING_UHS_SDR104] =
+ SDHCI_ITAPDLYSEL_SDR104_B0;
+ }
+ }
+
+ ret = of_property_read_u32(np, "xlnx,otap-delay-sdr104",
+ &otapdly[MMC_TIMING_UHS_SDR104]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined otapdly for MMC_TIMING_UHS_SDR104\n");
+ if (sdhci_arasan->mio_bank == MMC_BANK2) {
+ otapdly[MMC_TIMING_UHS_SDR104] =
+ SDHCI_OTAPDLYSEL_SDR104_B2;
+ } else {
+ otapdly[MMC_TIMING_UHS_SDR104] =
+ SDHCI_OTAPDLYSEL_SDR104_B0;
+ }
+ }
+
+ ret = of_property_read_u32(np, "xlnx,itap-delay-mmc-hs200",
+ &itapdly[MMC_TIMING_MMC_HS200]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined itapdly for MMC_TIMING_MMC_HS200\n");
+ if (sdhci_arasan->mio_bank == MMC_BANK2) {
+ itapdly[MMC_TIMING_MMC_HS200] =
+ SDHCI_ITAPDLYSEL_MMC_HS200_B2;
+ } else {
+ itapdly[MMC_TIMING_MMC_HS200] =
+ SDHCI_ITAPDLYSEL_MMC_HS200_B0;
+ }
+ }
+
+ ret = of_property_read_u32(np, "xlnx,otap-delay-mmc-hs200",
+ &otapdly[MMC_TIMING_MMC_HS200]);
+ if (ret) {
+ dev_dbg(dev,
+ "Using predefined otapdly for MMC_TIMING_MMC_HS200\n");
+ if (sdhci_arasan->mio_bank == MMC_BANK2) {
+ otapdly[MMC_TIMING_MMC_HS200] =
+ SDHCI_OTAPDLYSEL_MMC_HS200_B2;
+ } else {
+ otapdly[MMC_TIMING_MMC_HS200] =
+ SDHCI_OTAPDLYSEL_MMC_HS200_B0;
+ }
+ }
+}
+
static int sdhci_arasan_probe(struct platform_device *pdev)
{
int ret;
@@ -724,10 +1266,35 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_arasan_data *sdhci_arasan;
struct device_node *np = pdev->dev.of_node;
+ unsigned int host_quirks2 = 0;
const struct sdhci_arasan_of_data *data;
match = of_match_node(sdhci_arasan_of_match, pdev->dev.of_node);
data = match->data;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,zynqmp-8.9a")) {
+ char *soc_rev;
+
+ /* read Silicon version using nvmem driver */
+ soc_rev = zynqmp_nvmem_get_silicon_version(&pdev->dev,
+ "soc_revision");
+ if (PTR_ERR(soc_rev) == -EPROBE_DEFER)
+ /* Do a deferred probe */
+ return -EPROBE_DEFER;
+ else if (IS_ERR(soc_rev))
+ dev_dbg(&pdev->dev, "Error getting silicon version\n");
+
+ /* Set host quirk if the silicon version is v1.0 */
+ if (!IS_ERR(soc_rev) && (*soc_rev == ZYNQMP_SILICON_V1))
+ host_quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+
+ /* Clean soc_rev if got a valid pointer from nvmem driver
+ * else we may end up in kernel panic
+ */
+ if (!IS_ERR(soc_rev))
+ kfree(soc_rev);
+ }
+
host = sdhci_pltfm_init(pdev, data->pdata, sizeof(*sdhci_arasan));
if (IS_ERR(host))
@@ -739,6 +1306,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_arasan->soc_ctl_map = data->soc_ctl_map;
+ host->quirks2 |= host_quirks2;
+
node = of_parse_phandle(pdev->dev.of_node, "arasan,soc-ctl-syscon", 0);
if (node) {
sdhci_arasan->soc_ctl_base = syscon_node_to_regmap(node);
@@ -787,6 +1356,9 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (of_property_read_bool(np, "xlnx,int-clock-stable-broken"))
sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE;
+ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,versal-8.9a"))
+ sdhci_arasan->quirks |= SDHCI_ARASAN_TAPDELAY_REG_LOCAL;
+
pltfm_host->clk = clk_xin;
if (of_device_is_compatible(pdev->dev.of_node,
@@ -806,6 +1378,48 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto unreg_clk;
}
+ if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-8.9a")) {
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+ host->quirks2 |= SDHCI_QUIRK2_CLOCK_STANDARD_25_BROKEN;
+ }
+
+ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,zynqmp-8.9a") ||
+ of_device_is_compatible(pdev->dev.of_node, "xlnx,versal-8.9a")) {
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,mio_bank",
+ &sdhci_arasan->mio_bank);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "\"xlnx,mio_bank \" property is missing.\n");
+ goto clk_disable_all;
+ }
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,device_id",
+ &sdhci_arasan->device_id);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "\"xlnx,device_id \" property is missing.\n");
+ goto clk_disable_all;
+ }
+
+ arasan_zynqmp_dt_parse_tap_delays(&pdev->dev);
+
+ sdhci_arasan_ops.platform_execute_tuning =
+ arasan_zynqmp_execute_tuning;
+ }
+
+ sdhci_arasan->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!IS_ERR(sdhci_arasan->pinctrl)) {
+ sdhci_arasan->pins_default = pinctrl_lookup_state(
+ sdhci_arasan->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(sdhci_arasan->pins_default)) {
+ dev_err(&pdev->dev, "Missing default pinctrl config\n");
+ return IS_ERR(sdhci_arasan->pins_default);
+ }
+
+ pinctrl_select_state(sdhci_arasan->pinctrl,
+ sdhci_arasan->pins_default);
+ }
+
sdhci_arasan->phy = ERR_PTR(-ENODEV);
if (of_device_is_compatible(pdev->dev.of_node,
"arasan,sdhci-5.1")) {
@@ -838,6 +1452,13 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (ret)
goto err_add_host;
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
+
return 0;
err_add_host:
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index d268b3b8850a..b7833fa4f0a2 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -102,6 +102,9 @@ void sdhci_get_property(struct platform_device *pdev)
sdhci_get_compatibility(pdev);
+ if (device_property_present(dev, "broken-mmc-highspeed"))
+ host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
+
device_property_read_u32(dev, "clock-frequency", &pltfm_host->clock);
if (device_property_present(dev, "keep-power-in-suspend"))
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 5a8d97a8f1e1..30df594d267a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3965,7 +3965,8 @@ int sdhci_setup_host(struct sdhci_host *host)
if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
mmc->caps &= ~MMC_CAP_CMD23;
- if (host->caps & SDHCI_CAN_DO_HISPD)
+ if ((host->caps & SDHCI_CAN_DO_HISPD) &&
+ !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index f69bebe51520..187ef75307e1 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -483,6 +483,8 @@ struct sdhci_host {
* block count.
*/
#define SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1<<18)
+/* Broken Clock between 19MHz-25MHz */
+#define SDHCI_QUIRK2_CLOCK_STANDARD_25_BROKEN (1<<19)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index cf426956454c..626321f8ba94 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -198,6 +198,9 @@ static int __xipram cfi_chip_setup(struct map_info *map,
__u32 base = 0;
int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
int i;
+ int extendedId1 = 0;
+ int extendedId2 = 0;
+ int extendedId3 = 0;
int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA;
xip_enable(base, map, cfi);
@@ -222,6 +225,38 @@ static int __xipram cfi_chip_setup(struct map_info *map,
for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
+ /* Note we put the device back into Read Mode BEFORE going into Auto
+ * Select Mode, as some devices support nesting of modes, others
+ * don't. This way should always work.
+ * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
+ * so should be treated as nops or illegal (and so put the device
+ * back into Read Mode, which is a nop in this case).
+ */
+ cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi->mfr = cfi_read_query16(map, base);
+ cfi->id = cfi_read_query16(map, base + ofs_factor);
+
+ /* Get device ID cycle 1,2,3 for Numonyx/ST devices */
+ if ((cfi->mfr == CFI_MFR_INTEL || cfi->mfr == CFI_MFR_ST)
+ && ((cfi->id & 0xff) == 0x7e)
+ && (le16_to_cpu(cfi->cfiq->P_ID) == 0x0002)) {
+ extendedId1 = cfi_read_query16(map, base + 0x1 * ofs_factor);
+ extendedId2 = cfi_read_query16(map, base + 0xe * ofs_factor);
+ extendedId3 = cfi_read_query16(map, base + 0xf * ofs_factor);
+ }
+
+ /* Get AMD/Spansion extended JEDEC ID */
+ if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
+ cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
+ cfi_read_query(map, base + 0xf * ofs_factor);
+
+ /* Put it back into Read Mode */
+ cfi_qry_mode_off(base, map, cfi);
+ xip_allowed(base, map);
+
/* Do any necessary byteswapping */
cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
@@ -231,6 +266,16 @@ static int __xipram cfi_chip_setup(struct map_info *map,
cfi->cfiq->InterfaceDesc = le16_to_cpu(cfi->cfiq->InterfaceDesc);
cfi->cfiq->MaxBufWriteSize = le16_to_cpu(cfi->cfiq->MaxBufWriteSize);
+ /* If the device is a M29EW used in 8-bit mode, adjust buffer size */
+ if ((cfi->cfiq->MaxBufWriteSize > 0x8) && (cfi->mfr == CFI_MFR_INTEL ||
+ cfi->mfr == CFI_MFR_ST) && (extendedId1 == 0x7E) &&
+ (extendedId2 == 0x22 || extendedId2 == 0x23 || extendedId2 == 0x28) &&
+ (extendedId3 == 0x01)) {
+ cfi->cfiq->MaxBufWriteSize = 0x8;
+ pr_warning("Adjusted buffer size on Numonyx flash M29EW family");
+ pr_warning("in 8 bit mode\n");
+ }
+
#ifdef DEBUG_CFI
/* Dump the information therein */
print_cfi_ident(cfi->cfiq);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index c50888670250..dceeec32a4ca 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -190,6 +190,7 @@ static int m25p_probe(struct spi_mem *spimem)
spi_mem_set_drvdata(spimem, flash);
flash->spimem = spimem;
+ nor->spi = spi;
if (spi->mode & SPI_RX_OCTAL) {
hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 5a711d8beaca..29b69a2f2b0a 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -546,4 +546,18 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
load time (assuming you build diskonchip as a module) with the module
parameter "inftl_bbt_write=1".
+config MTD_NAND_ARASAN
+ tristate "Support for Arasan Nand Flash controller"
+ depends on HAS_IOMEM && HAS_DMA
+ help
+ Enables the driver for the Arasan Nand Flash controller on
+ Zynq Ultrascale+ MPSoC.
+
+config MTD_NAND_PL353
+ tristate "ARM Pl353 NAND flash driver"
+ depends on MTD_NAND && ARM
+ depends on PL353_SMC
+ help
+ Enables support for PrimeCell Static Memory Controller PL353.
+
endif # MTD_RAW_NAND
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index efaf5cd25edc..cc292a592457 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -57,6 +57,8 @@ obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o
+obj-$(CONFIG_MTD_NAND_ARASAN) += arasan_nand.o
+obj-$(CONFIG_MTD_NAND_PL353) += pl353_nand.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/arasan_nand.c b/drivers/mtd/nand/raw/arasan_nand.c
new file mode 100644
index 000000000000..58ad872b3476
--- /dev/null
+++ b/drivers/mtd/nand/raw/arasan_nand.c
@@ -0,0 +1,1465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arasan NAND Flash Controller Driver
+ *
+ * Copyright (C) 2014 - 2017 Xilinx, Inc.
+ * Author: Punnaiah Choudary Kalluri <punnaia@xilinx.com>
+ * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#define EVENT_TIMEOUT_MSEC 1000
+#define ANFC_PM_TIMEOUT 1000 /* ms */
+
+#define PKT_OFST 0x00
+#define PKT_CNT_SHIFT 12
+
+#define MEM_ADDR1_OFST 0x04
+#define MEM_ADDR2_OFST 0x08
+#define PG_ADDR_SHIFT 16
+#define BCH_MODE_SHIFT 25
+#define MEM_ADDR_MASK GENMASK(7, 0)
+#define BCH_MODE_MASK GENMASK(27, 25)
+#define CS_MASK GENMASK(31, 30)
+#define CS_SHIFT 30
+
+#define CMD_OFST 0x0C
+#define ECC_ENABLE BIT(31)
+#define DMA_EN_MASK GENMASK(27, 26)
+#define DMA_ENABLE 0x2
+#define DMA_EN_SHIFT 26
+#define REG_PAGE_SIZE_SHIFT 23
+
+#define PROG_OFST 0x10
+#define PROG_PGRD BIT(0)
+#define PROG_ERASE BIT(2)
+#define PROG_STATUS BIT(3)
+#define PROG_PGPROG BIT(4)
+#define PROG_RDID BIT(6)
+#define PROG_RDPARAM BIT(7)
+#define PROG_RST BIT(8)
+#define PROG_GET_FEATURE BIT(9)
+#define PROG_SET_FEATURE BIT(10)
+
+#define INTR_STS_EN_OFST 0x14
+#define INTR_SIG_EN_OFST 0x18
+#define XFER_COMPLETE BIT(2)
+#define READ_READY BIT(1)
+#define WRITE_READY BIT(0)
+#define MBIT_ERROR BIT(3)
+#define EVENT_MASK (XFER_COMPLETE | READ_READY | WRITE_READY | MBIT_ERROR)
+
+#define INTR_STS_OFST 0x1C
+#define READY_STS_OFST 0x20
+#define DMA_ADDR1_OFST 0x24
+#define FLASH_STS_OFST 0x28
+#define DATA_PORT_OFST 0x30
+#define ECC_OFST 0x34
+#define BCH_EN_SHIFT 27
+#define ECC_SIZE_SHIFT 16
+
+#define ECC_ERR_CNT_OFST 0x38
+#define PAGE_ERR_CNT_MASK GENMASK(16, 8)
+#define PKT_ERR_CNT_MASK GENMASK(7, 0)
+
+#define ECC_SPR_CMD_OFST 0x3C
+#define CMD2_SHIFT 8
+#define ADDR_CYCLES_SHIFT 28
+
+#define ECC_ERR_CNT_1BIT_OFST 0x40
+#define ECC_ERR_CNT_2BIT_OFST 0x44
+#define DMA_ADDR0_OFST 0x50
+#define DATA_INTERFACE_OFST 0x6C
+#define ANFC_MAX_CHUNK_SIZE 0x4000
+#define ANFC_MAX_ADDR_CYCLES 7
+
+#define REG_PAGE_SIZE_512 0
+#define REG_PAGE_SIZE_1K 5
+#define REG_PAGE_SIZE_2K 1
+#define REG_PAGE_SIZE_4K 2
+#define REG_PAGE_SIZE_8K 3
+#define REG_PAGE_SIZE_16K 4
+
+#define TEMP_BUF_SIZE 1024
+#define NVDDR_MODE_PACKET_SIZE 8
+#define SDR_MODE_PACKET_SIZE 4
+
+#define ONFI_DATA_INTERFACE_NVDDR BIT(4)
+#define NVDDR_MODE BIT(9)
+#define NVDDR_TIMING_MODE_SHIFT 3
+
+#define SDR_MODE_DEFLT_FREQ 80000000
+#define COL_ROW_ADDR(pos, val) (((val) & 0xFF) << (8 * (pos)))
+
+struct anfc_op {
+ u32 cmds[4];
+ u32 len;
+ u32 col;
+ u32 row;
+ unsigned int data_instr_idx;
+ const struct nand_op_instr *data_instr;
+};
+
+/**
+ * struct anfc_nand_chip - Defines the nand chip related information
+ * @node: Used to store NAND chips into a list.
+ * @chip: NAND chip information structure.
+ * @strength: Bch or Hamming mode enable/disable.
+ * @ecc_strength: Ecc strength 4.8/12/16.
+ * @eccval: Ecc config value.
+ * @raddr_cycles: Row address cycle information.
+ * @caddr_cycles: Column address cycle information.
+ * @pktsize: Packet size for read / write operation.
+ * @csnum: chipselect number to be used.
+ * @spktsize: Packet size in ddr mode for status operation.
+ * @inftimeval: Data interface and timing mode information
+ */
+struct anfc_nand_chip {
+ struct list_head node;
+ struct nand_chip chip;
+ bool strength;
+ u32 ecc_strength;
+ u32 eccval;
+ u16 raddr_cycles;
+ u16 caddr_cycles;
+ u32 pktsize;
+ int csnum;
+ u32 spktsize;
+ u32 inftimeval;
+};
+
+/**
+ * struct anfc_nand_controller - Defines the Arasan NAND flash controller
+ * driver instance
+ * @controller: base controller structure.
+ * @chips: list of all nand chips attached to the ctrler.
+ * @dev: Pointer to the device structure.
+ * @base: Virtual address of the NAND flash device.
+ * @curr_cmd: Current command issued.
+ * @clk_sys: Pointer to the system clock.
+ * @clk_flash: Pointer to the flash clock.
+ * @dma: Dma enable/disable.
+ * @buf: Buffer used for read/write byte operations.
+ * @irq: irq number
+ * @bufshift: Variable used for indexing buffer operation
+ * @csnum: Chip select number currently inuse.
+ * @event: Completion event for nand status events.
+ * @status: Status of the flash device.
+ * @prog: Used to initiate controller operations.
+ * @chip_active: Used to check the chip select state, active or not.
+ */
+struct anfc_nand_controller {
+ struct nand_controller controller;
+ struct list_head chips;
+ struct device *dev;
+ void __iomem *base;
+ int curr_cmd;
+ struct clk *clk_sys;
+ struct clk *clk_flash;
+ int irq;
+ int csnum;
+ struct completion event;
+ int status;
+ u32 prog;
+ u8 buf[TEMP_BUF_SIZE];
+ bool chip_active;
+};
+
+static int anfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = nand->ecc.total;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int anfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = mtd->oobsize - nand->ecc.total - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops anfc_ooblayout_ops = {
+ .ecc = anfc_ooblayout_ecc,
+ .free = anfc_ooblayout_free,
+};
+
+static inline struct anfc_nand_chip *to_anfc_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct anfc_nand_chip, chip);
+}
+
+static inline struct anfc_nand_controller *to_anfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct anfc_nand_controller, controller);
+}
+
+static u8 anfc_page(u32 pagesize)
+{
+ switch (pagesize) {
+ case 512:
+ return REG_PAGE_SIZE_512;
+ case 1024:
+ return REG_PAGE_SIZE_1K;
+ case 2048:
+ return REG_PAGE_SIZE_2K;
+ case 4096:
+ return REG_PAGE_SIZE_4K;
+ case 8192:
+ return REG_PAGE_SIZE_8K;
+ case 16384:
+ return REG_PAGE_SIZE_16K;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline void anfc_enable_intrs(struct anfc_nand_controller *nfc, u32 val)
+{
+ writel(val, nfc->base + INTR_STS_EN_OFST);
+ writel(val, nfc->base + INTR_SIG_EN_OFST);
+}
+
+static inline void anfc_config_ecc(struct anfc_nand_controller *nfc, bool on)
+{
+ u32 val;
+
+ val = readl(nfc->base + CMD_OFST);
+ if (on)
+ val |= ECC_ENABLE;
+ else
+ val &= ~ECC_ENABLE;
+ writel(val, nfc->base + CMD_OFST);
+}
+
+static inline void anfc_config_dma(struct anfc_nand_controller *nfc, int on)
+{
+ u32 val;
+
+ val = readl(nfc->base + CMD_OFST);
+ val &= ~DMA_EN_MASK;
+ if (on)
+ val |= DMA_ENABLE << DMA_EN_SHIFT;
+ writel(val, nfc->base + CMD_OFST);
+}
+
+static inline int anfc_wait_for_event(struct anfc_nand_controller *nfc)
+{
+ return wait_for_completion_timeout(&nfc->event,
+ msecs_to_jiffies(EVENT_TIMEOUT_MSEC));
+}
+
+static inline void anfc_setpktszcnt(struct anfc_nand_controller *nfc,
+ u32 pktsize, u32 pktcount)
+{
+ writel(pktsize | (pktcount << PKT_CNT_SHIFT), nfc->base + PKT_OFST);
+}
+
+static inline void anfc_set_eccsparecmd(struct anfc_nand_controller *nfc,
+ struct anfc_nand_chip *achip, u8 cmd1,
+ u8 cmd2)
+{
+ writel(cmd1 | (cmd2 << CMD2_SHIFT) |
+ (achip->caddr_cycles << ADDR_CYCLES_SHIFT),
+ nfc->base + ECC_SPR_CMD_OFST);
+}
+
+static void anfc_setpagecoladdr(struct anfc_nand_controller *nfc, u32 page,
+ u16 col)
+{
+ u32 val;
+
+ writel(col | (page << PG_ADDR_SHIFT), nfc->base + MEM_ADDR1_OFST);
+
+ val = readl(nfc->base + MEM_ADDR2_OFST);
+ val = (val & ~MEM_ADDR_MASK) |
+ ((page >> PG_ADDR_SHIFT) & MEM_ADDR_MASK);
+ writel(val, nfc->base + MEM_ADDR2_OFST);
+}
+
+static void anfc_prepare_cmd(struct anfc_nand_controller *nfc, u8 cmd1,
+ u8 cmd2, u8 dmamode,
+ u32 pagesize, u8 addrcycles)
+{
+ u32 regval;
+
+ regval = cmd1 | (cmd2 << CMD2_SHIFT);
+ if (dmamode)
+ regval |= DMA_ENABLE << DMA_EN_SHIFT;
+ regval |= addrcycles << ADDR_CYCLES_SHIFT;
+ regval |= anfc_page(pagesize) << REG_PAGE_SIZE_SHIFT;
+ writel(regval, nfc->base + CMD_OFST);
+}
+
+static void anfc_rw_dma_op(struct mtd_info *mtd, u8 *buf, int len,
+ bool do_read, u32 prog, int pktcount, int pktsize)
+{
+ dma_addr_t paddr;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 eccintr = 0, dir;
+
+ if (pktsize == 0)
+ pktsize = len;
+
+ anfc_setpktszcnt(nfc, pktsize, pktcount);
+
+ if (!achip->strength)
+ eccintr = MBIT_ERROR;
+
+ if (do_read)
+ dir = DMA_FROM_DEVICE;
+ else
+ dir = DMA_TO_DEVICE;
+
+ paddr = dma_map_single(nfc->dev, buf, len, dir);
+ if (dma_mapping_error(nfc->dev, paddr)) {
+ dev_err(nfc->dev, "Read buffer mapping error");
+ return;
+ }
+ writel(paddr, nfc->base + DMA_ADDR0_OFST);
+ writel((paddr >> 32), nfc->base + DMA_ADDR1_OFST);
+ anfc_enable_intrs(nfc, (XFER_COMPLETE | eccintr));
+ writel(prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+ dma_unmap_single(nfc->dev, paddr, len, dir);
+}
+
+static void anfc_rw_pio_op(struct mtd_info *mtd, u8 *buf, int len,
+ bool do_read, int prog, int pktcount, int pktsize)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 *bufptr = (u32 *)buf;
+ u32 cnt = 0, intr = 0;
+
+ anfc_config_dma(nfc, 0);
+
+ if (pktsize == 0)
+ pktsize = len;
+
+ anfc_setpktszcnt(nfc, pktsize, pktcount);
+
+ if (!achip->strength)
+ intr = MBIT_ERROR;
+
+ if (do_read)
+ intr |= READ_READY;
+ else
+ intr |= WRITE_READY;
+
+ anfc_enable_intrs(nfc, intr);
+ writel(prog, nfc->base + PROG_OFST);
+ while (cnt < pktcount) {
+ anfc_wait_for_event(nfc);
+ cnt++;
+ if (cnt == pktcount)
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ if (do_read)
+ ioread32_rep(nfc->base + DATA_PORT_OFST, bufptr,
+ pktsize / 4);
+ else
+ iowrite32_rep(nfc->base + DATA_PORT_OFST, bufptr,
+ pktsize / 4);
+ bufptr += (pktsize / 4);
+ if (cnt < pktcount)
+ anfc_enable_intrs(nfc, intr);
+ }
+ anfc_wait_for_event(nfc);
+}
+
+static void anfc_read_data_op(struct nand_chip *chip, u8 *buf, int len,
+ int pktcount, int pktsize)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (virt_addr_valid(buf))
+ anfc_rw_dma_op(mtd, buf, len, 1, PROG_PGRD, pktcount, pktsize);
+ else
+ anfc_rw_pio_op(mtd, buf, len, 1, PROG_PGRD, pktcount, pktsize);
+}
+
+static void anfc_write_data_op(struct nand_chip *chip, const u8 *buf,
+ int len, int pktcount, int pktsize)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (virt_addr_valid(buf))
+ anfc_rw_dma_op(mtd, (char *)buf, len, 0, PROG_PGPROG, pktcount,
+ pktsize);
+ else
+ anfc_rw_pio_op(mtd, (char *)buf, len, 0, PROG_PGPROG, pktcount,
+ pktsize);
+}
+
+static int anfc_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u8 *ecc_code = chip->ecc.code_buf;
+ u8 *p;
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int stat = 0, i;
+ u32 ret;
+ unsigned int max_bitflips = 0;
+ u32 eccsteps;
+ u32 one_bit_err = 0, multi_bit_err = 0;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ anfc_set_eccsparecmd(nfc, achip, NAND_CMD_RNDOUT, NAND_CMD_RNDOUTSTART);
+ anfc_config_ecc(nfc, true);
+ anfc_read_data_op(chip, buf, mtd->writesize,
+ DIV_ROUND_UP(mtd->writesize, achip->pktsize),
+ achip->pktsize);
+
+ if (achip->strength) {
+ /*
+ * In BCH mode Arasan NAND controller can correct ECC upto
+ * 24-bit Beyond that, it can't even detect errors.
+ */
+ multi_bit_err = readl(nfc->base + ECC_ERR_CNT_OFST);
+ multi_bit_err = ((multi_bit_err & PAGE_ERR_CNT_MASK) >> 8);
+ } else {
+ /*
+ * In Hamming mode Arasan NAND controller can correct ECC upto
+ * 1-bit and can detect upto 4-bit errors.
+ */
+ one_bit_err = readl(nfc->base + ECC_ERR_CNT_1BIT_OFST);
+ multi_bit_err = readl(nfc->base + ECC_ERR_CNT_2BIT_OFST);
+
+ /* Clear ecc error count register 1Bit, 2Bit */
+ writel(0x0, nfc->base + ECC_ERR_CNT_1BIT_OFST);
+ writel(0x0, nfc->base + ECC_ERR_CNT_2BIT_OFST);
+ }
+
+ anfc_config_ecc(nfc, false);
+
+ if (oob_required)
+ chip->ecc.read_oob(mtd, chip, page);
+
+ if (multi_bit_err || one_bit_err) {
+ if (!oob_required)
+ chip->ecc.read_oob(mtd, chip, page);
+
+ mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ eccsteps = chip->ecc.steps;
+ p = buf;
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes,
+ p += eccsize) {
+ stat = nand_check_erased_ecc_chunk(p,
+ chip->ecc.size,
+ &ecc_code[i],
+ eccbytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (stat < 0) {
+ stat = 0;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ stat);
+ }
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int anfc_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ anfc_set_eccsparecmd(nfc, achip, NAND_CMD_RNDIN, 0);
+ anfc_config_ecc(nfc, true);
+ anfc_write_data_op(chip, buf, mtd->writesize,
+ DIV_ROUND_UP(mtd->writesize, achip->pktsize),
+ achip->pktsize);
+
+ if (oob_required)
+ chip->ecc.write_oob(mtd, chip, page);
+
+ anfc_config_ecc(nfc, false);
+
+ return 0;
+}
+
+static int anfc_ecc_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc, int ecc_mode)
+{
+ u32 ecc_addr;
+ unsigned int ecc_strength, steps;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+
+ ecc->mode = NAND_ECC_HW;
+ ecc->read_page = anfc_read_page_hwecc;
+ ecc->write_page = anfc_write_page_hwecc;
+
+ mtd_set_ooblayout(mtd, &anfc_ooblayout_ops);
+
+ steps = mtd->writesize / chip->ecc_step_ds;
+
+ switch (chip->ecc_strength_ds) {
+ case 12:
+ ecc_strength = 0x1;
+ break;
+ case 8:
+ ecc_strength = 0x2;
+ break;
+ case 4:
+ ecc_strength = 0x3;
+ break;
+ case 24:
+ ecc_strength = 0x4;
+ break;
+ default:
+ ecc_strength = 0x0;
+ }
+ if (!ecc_strength)
+ ecc->total = 3 * steps;
+ else
+ ecc->total =
+ DIV_ROUND_UP(fls(8 * chip->ecc_step_ds) *
+ chip->ecc_strength_ds * steps, 8);
+
+ ecc->strength = chip->ecc_strength_ds;
+ ecc->size = chip->ecc_step_ds;
+ ecc->bytes = ecc->total / steps;
+ ecc->steps = steps;
+ achip->ecc_strength = ecc_strength;
+ achip->strength = achip->ecc_strength;
+ ecc_addr = mtd->writesize + (mtd->oobsize - ecc->total);
+ achip->eccval = ecc_addr | (ecc->total << ECC_SIZE_SHIFT) |
+ (achip->strength << BCH_EN_SHIFT);
+
+ if (chip->ecc_step_ds >= 1024)
+ achip->pktsize = 1024;
+ else
+ achip->pktsize = 512;
+
+ return 0;
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void anfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct anfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id;
+ int i = 0;
+
+ memset(nfc_op, 0, sizeof(struct anfc_op));
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int naddrs;
+
+ instr = &subop->instrs[op_id];
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (op_id)
+ nfc_op->cmds[1] = instr->ctx.cmd.opcode;
+ else
+ nfc_op->cmds[0] = instr->ctx.cmd.opcode;
+ nfc->curr_cmd = nfc_op->cmds[0];
+
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ i = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop,
+ op_id);
+
+ for (; i < naddrs; i++) {
+ u8 val = instr->ctx.addr.addrs[i];
+
+ if (nfc_op->cmds[0] == NAND_CMD_ERASE1) {
+ nfc_op->row |= COL_ROW_ADDR(i, val);
+ } else {
+ if (i < 2)
+ nfc_op->col |= COL_ROW_ADDR(i,
+ val);
+ else
+ nfc_op->row |= COL_ROW_ADDR(i -
+ 2, val);
+ }
+ }
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ break;
+ }
+ }
+}
+
+static int anfc_reset_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct anfc_op nfc_op = {};
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+
+ /*
+ * Do not execute commands other than NAND_CMD_RESET
+ * Other commands have their own patterns
+ * If there is no pattern match, that means controller
+ * is not supporting that pattern.
+ */
+ if (nfc_op.cmds[0] != NAND_CMD_RESET)
+ return 0;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, 0, 0, 0);
+ nfc->prog = PROG_RST;
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(nfc->prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+
+ return 0;
+}
+
+static int anfc_read_id_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_op nfc_op = {};
+ unsigned int op_id, len;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, 0, 0, 1);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+ nfc->prog = PROG_RDID;
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, PROG_RDID, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_op nfc_op = {};
+ unsigned int op_id, len;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, 0, 0, 0);
+ anfc_setpktszcnt(nfc, achip->spktsize / 4, 1);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+ nfc->prog = PROG_STATUS;
+
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(nfc->prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+
+ /*
+ * The Arasan NAND controller will update the status value
+ * returned by the flash device in FLASH_STS register.
+ */
+ nfc->status = readl(nfc->base + FLASH_STS_OFST);
+ memcpy(instr->ctx.data.buf.in, &nfc->status, len);
+
+ return 0;
+}
+
+static int anfc_erase_and_zero_len_page_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop
+ *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 dma_mode = 0, write_size = 0, addrcycles = 0, len, op_id;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ if (nfc_op.cmds[0] == NAND_CMD_ERASE1) {
+ nfc->prog = PROG_ERASE;
+ addrcycles = achip->raddr_cycles;
+ write_size = 0;
+ dma_mode = 0;
+ nfc_op.col = nfc_op.row & 0xffff;
+ nfc_op.row = (nfc_op.row >> PG_ADDR_SHIFT) & 0xffff;
+ }
+ if (nfc_op.cmds[0] == NAND_CMD_READ0) {
+ nfc->prog = PROG_PGRD;
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ write_size = mtd->writesize;
+ dma_mode = 1;
+ }
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], nfc_op.cmds[1], dma_mode,
+ write_size, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (nfc_op.cmds[0] == NAND_CMD_ERASE1) {
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(nfc->prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+ }
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_read_data_op(chip, instr->ctx.data.buf.in, len, 1, 0);
+
+ return 0;
+}
+
+static int anfc_read_param_get_feature_sp_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop
+ *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 dma_mode, addrcycles, write_size;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ if (nfc_op.cmds[0] == NAND_CMD_PARAM) {
+ nfc->prog = PROG_RDPARAM;
+ dma_mode = 0;
+ addrcycles = 1;
+ write_size = 0;
+ }
+ if (nfc_op.cmds[0] == NAND_CMD_GET_FEATURES) {
+ nfc->prog = PROG_GET_FEATURE;
+ dma_mode = 0;
+ addrcycles = 1;
+ write_size = 0;
+ }
+ if (nfc_op.cmds[0] == NAND_CMD_READ0) {
+ nfc->prog = PROG_PGRD;
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ write_size = mtd->writesize;
+ dma_mode = 1;
+ }
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, dma_mode, write_size,
+ addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, nfc->prog, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_random_datain_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, PROG_PGRD, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_setfeature_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_SET_FEATURE;
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], 0, 0, 0, 1);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_write_data_op(chip, (char *)instr->ctx.data.buf.out, len, 1, 0);
+
+ return 0;
+}
+
+static int anfc_change_read_column_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_PGRD;
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], nfc_op.cmds[1], 1,
+ mtd->writesize, 2);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, nfc->prog, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_page_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_PGRD;
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], nfc_op.cmds[1], 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, nfc->prog, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_zero_len_page_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_PGRD;
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], NAND_CMD_PAGEPROG, 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ return 0;
+}
+
+static int anfc_page_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ nfc->prog = PROG_PGPROG;
+
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], nfc_op.cmds[1], 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_write_data_op(chip, (char *)instr->ctx.data.buf.out, len, 1, 0);
+
+ return 0;
+}
+
+static int anfc_page_write_nowait_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ nfc->prog = PROG_PGPROG;
+
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ anfc_prepare_cmd(nfc, nfc_op.cmds[0], NAND_CMD_PAGEPROG, 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ anfc_write_data_op(chip, (char *)instr->ctx.data.buf.out,
+ mtd->writesize, DIV_ROUND_UP(mtd->writesize,
+ achip->pktsize), achip->pktsize);
+
+ return 0;
+}
+
+static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
+ /* Use a separate function for each pattern */
+ NAND_OP_PARSER_PATTERN(
+ anfc_random_datain_type_exec,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_change_read_column_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_page_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_page_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_erase_and_zero_len_page_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_reset_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_setfeature_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_page_write_nowait_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_read_param_get_feature_sp_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_zero_len_page_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES)),
+ );
+
+static int anfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ return nand_op_parser_exec_op(chip, &anfc_op_parser,
+ op, check_only);
+}
+
+static void anfc_select_chip(struct mtd_info *mtd, int num)
+{
+ u32 val;
+ int ret;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ if (num < 0) {
+ nfc->chip_active = false;
+ pm_runtime_mark_last_busy(nfc->dev);
+ pm_runtime_put_autosuspend(nfc->dev);
+ return;
+ }
+
+ nfc->chip_active = true;
+ ret = pm_runtime_get_sync(nfc->dev);
+ if (ret < 0) {
+ dev_err(nfc->dev, "runtime_get_sync failed\n");
+ return;
+ }
+
+ val = readl(nfc->base + MEM_ADDR2_OFST);
+ val &= (val & ~(CS_MASK | BCH_MODE_MASK));
+ val |= (achip->csnum << CS_SHIFT) |
+ (achip->ecc_strength << BCH_MODE_SHIFT);
+ writel(val, nfc->base + MEM_ADDR2_OFST);
+ nfc->csnum = achip->csnum;
+ writel(achip->eccval, nfc->base + ECC_OFST);
+ writel(achip->inftimeval, nfc->base + DATA_INTERFACE_OFST);
+}
+
+static irqreturn_t anfc_irq_handler(int irq, void *ptr)
+{
+ struct anfc_nand_controller *nfc = ptr;
+ u32 status;
+
+ status = readl(nfc->base + INTR_STS_OFST);
+ if (status & EVENT_MASK) {
+ complete(&nfc->event);
+ writel(status & EVENT_MASK, nfc->base + INTR_STS_OFST);
+ writel(0, nfc->base + INTR_STS_EN_OFST);
+ writel(0, nfc->base + INTR_SIG_EN_OFST);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int anfc_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 ret;
+
+ if (mtd->writesize <= SZ_512)
+ achip->caddr_cycles = 1;
+ else
+ achip->caddr_cycles = 2;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ achip->raddr_cycles = 3;
+ else
+ achip->raddr_cycles = 2;
+
+ chip->ecc.calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ chip->ecc.code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ ret = anfc_ecc_init(mtd, &chip->ecc, chip->ecc.mode);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct nand_controller_ops anfc_nand_controller_ops = {
+ .attach_chip = anfc_nand_attach_chip,
+};
+
+static int anfc_init_timing_mode(struct anfc_nand_controller *nfc,
+ struct anfc_nand_chip *achip)
+{
+ struct nand_chip *chip = &achip->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int mode, err;
+ unsigned int feature[2];
+ u32 inftimeval;
+ bool change_sdr_clk = false;
+
+ memset(feature, 0, NVDDR_MODE_PACKET_SIZE);
+ /* Get nvddr timing modes */
+ mode = onfi_get_sync_timing_mode(chip) & 0xff;
+ if (!mode) {
+ mode = fls(onfi_get_async_timing_mode(chip)) - 1;
+ inftimeval = mode;
+ if (mode >= 2 && mode <= 5)
+ change_sdr_clk = true;
+ } else {
+ mode = fls(mode) - 1;
+ inftimeval = NVDDR_MODE | (mode << NVDDR_TIMING_MODE_SHIFT);
+ mode |= ONFI_DATA_INTERFACE_NVDDR;
+ }
+
+ feature[0] = mode;
+ chip->select_chip(mtd, achip->csnum);
+ err = chip->set_features(mtd, chip, ONFI_FEATURE_ADDR_TIMING_MODE,
+ (uint8_t *)feature);
+ chip->select_chip(mtd, -1);
+ if (err)
+ return err;
+
+ /*
+ * SDR timing modes 2-5 will not work for the arasan nand when
+ * freq > 90 MHz, so reduce the freq in SDR modes 2-5 to < 90Mhz
+ */
+ if (change_sdr_clk) {
+ clk_disable_unprepare(nfc->clk_sys);
+ err = clk_set_rate(nfc->clk_sys, SDR_MODE_DEFLT_FREQ);
+ if (err) {
+ dev_err(nfc->dev, "Can't set the clock rate\n");
+ return err;
+ }
+ err = clk_prepare_enable(nfc->clk_sys);
+ if (err) {
+ dev_err(nfc->dev, "Unable to enable sys clock.\n");
+ clk_disable_unprepare(nfc->clk_sys);
+ return err;
+ }
+ }
+ achip->inftimeval = inftimeval;
+
+ if (mode & ONFI_DATA_INTERFACE_NVDDR)
+ achip->spktsize = NVDDR_MODE_PACKET_SIZE;
+
+ return 0;
+}
+
+static int anfc_nand_chip_init(struct anfc_nand_controller *nfc,
+ struct anfc_nand_chip *anand_chip,
+ struct device_node *np)
+{
+ struct nand_chip *chip = &anand_chip->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &anand_chip->csnum);
+ if (ret) {
+ dev_err(nfc->dev, "can't get chip-select\n");
+ return -ENXIO;
+ }
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL, "arasan_nand.%d",
+ anand_chip->csnum);
+ mtd->dev.parent = nfc->dev;
+
+ chip->chip_delay = 30;
+ chip->controller = &nfc->controller;
+ chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ chip->select_chip = anfc_select_chip;
+ chip->exec_op = anfc_exec_op;
+ nand_set_flash_node(chip, np);
+
+ anand_chip->spktsize = SDR_MODE_PACKET_SIZE;
+
+ ret = nand_scan(mtd, 1);
+ if (ret) {
+ dev_err(nfc->dev, "nand_scan_tail for NAND failed\n");
+ return ret;
+ }
+
+ ret = anfc_init_timing_mode(nfc, anand_chip);
+ if (ret) {
+ dev_err(nfc->dev, "timing mode init failed\n");
+ return ret;
+ }
+
+ return mtd_device_register(mtd, NULL, 0);
+}
+
+static int anfc_probe(struct platform_device *pdev)
+{
+ struct anfc_nand_controller *nfc;
+ struct anfc_nand_chip *anand_chip;
+ struct device_node *np = pdev->dev.of_node, *child;
+ struct resource *res;
+ int err;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+ init_completion(&nfc->event);
+ nfc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, nfc);
+ nfc->csnum = -1;
+ nfc->controller.ops = &anfc_nand_controller_ops;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->base))
+ return PTR_ERR(nfc->base);
+ nfc->irq = platform_get_irq(pdev, 0);
+ if (nfc->irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq failed\n");
+ return -ENXIO;
+ }
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = devm_request_irq(&pdev->dev, nfc->irq, anfc_irq_handler,
+ 0, "arasannfc", nfc);
+ if (err)
+ return err;
+ nfc->clk_sys = devm_clk_get(&pdev->dev, "clk_sys");
+ if (IS_ERR(nfc->clk_sys)) {
+ dev_err(&pdev->dev, "sys clock not found.\n");
+ return PTR_ERR(nfc->clk_sys);
+ }
+
+ nfc->clk_flash = devm_clk_get(&pdev->dev, "clk_flash");
+ if (IS_ERR(nfc->clk_flash)) {
+ dev_err(&pdev->dev, "flash clock not found.\n");
+ return PTR_ERR(nfc->clk_flash);
+ }
+
+ err = clk_prepare_enable(nfc->clk_sys);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable sys clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(nfc->clk_flash);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable flash clock.\n");
+ goto clk_dis_sys;
+ }
+
+ pm_runtime_set_autosuspend_delay(nfc->dev, ANFC_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(nfc->dev);
+ pm_runtime_set_active(nfc->dev);
+ pm_runtime_get_noresume(nfc->dev);
+ pm_runtime_enable(nfc->dev);
+ for_each_available_child_of_node(np, child) {
+ anand_chip = devm_kzalloc(&pdev->dev, sizeof(*anand_chip),
+ GFP_KERNEL);
+ if (!anand_chip) {
+ of_node_put(child);
+ err = -ENOMEM;
+ goto nandchip_clean_up;
+ }
+ err = anfc_nand_chip_init(nfc, anand_chip, child);
+ if (err) {
+ devm_kfree(&pdev->dev, anand_chip);
+ continue;
+ }
+
+ list_add_tail(&anand_chip->node, &nfc->chips);
+ }
+ pm_runtime_mark_last_busy(nfc->dev);
+ pm_runtime_put_autosuspend(nfc->dev);
+ return 0;
+
+nandchip_clean_up:
+ list_for_each_entry(anand_chip, &nfc->chips, node)
+ nand_release(nand_to_mtd(&anand_chip->chip));
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ clk_disable_unprepare(nfc->clk_flash);
+clk_dis_sys:
+ clk_disable_unprepare(nfc->clk_sys);
+
+ return err;
+}
+
+static int anfc_remove(struct platform_device *pdev)
+{
+ struct anfc_nand_controller *nfc = platform_get_drvdata(pdev);
+ struct anfc_nand_chip *anand_chip;
+
+ list_for_each_entry(anand_chip, &nfc->chips, node)
+ nand_release(nand_to_mtd(&anand_chip->chip));
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
+ clk_disable_unprepare(nfc->clk_sys);
+ clk_disable_unprepare(nfc->clk_flash);
+
+ return 0;
+}
+
+static const struct of_device_id anfc_ids[] = {
+ { .compatible = "arasan,nfc-v3p10" },
+ { .compatible = "xlnx,zynqmp-nand" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, anfc_ids);
+
+static int anfc_suspend(struct device *dev)
+{
+ return pm_runtime_put_sync(dev);
+}
+
+static int anfc_resume(struct device *dev)
+{
+ return pm_runtime_get_sync(dev);
+}
+
+static int __maybe_unused anfc_runtime_suspend(struct device *dev)
+{
+ struct anfc_nand_controller *nfc = dev_get_drvdata(dev);
+
+ clk_disable(nfc->clk_sys);
+ clk_disable(nfc->clk_flash);
+
+ return 0;
+}
+
+static int __maybe_unused anfc_runtime_idle(struct device *dev)
+{
+ struct anfc_nand_controller *nfc = dev_get_drvdata(dev);
+
+ if (nfc->chip_active)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int __maybe_unused anfc_runtime_resume(struct device *dev)
+{
+ struct anfc_nand_controller *nfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(nfc->clk_sys);
+ if (ret) {
+ dev_err(dev, "Cannot enable sys clock.\n");
+ return ret;
+ }
+
+ ret = clk_enable(nfc->clk_flash);
+ if (ret) {
+ dev_err(dev, "Cannot enable flash clock.\n");
+ clk_disable(nfc->clk_sys);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops anfc_pm_ops = {
+ .resume = anfc_resume,
+ .suspend = anfc_suspend,
+ .runtime_resume = anfc_runtime_resume,
+ .runtime_suspend = anfc_runtime_suspend,
+ .runtime_idle = anfc_runtime_idle,
+};
+
+static struct platform_driver anfc_driver = {
+ .driver = {
+ .name = "arasan-nand-controller",
+ .of_match_table = anfc_ids,
+ .pm = &anfc_pm_ops,
+ },
+ .probe = anfc_probe,
+ .remove = anfc_remove,
+};
+module_platform_driver(anfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xilinx, Inc");
+MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 360b61411f07..0354ffe3bd83 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -467,9 +467,18 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
* if we don't do this. I have no clue why, but I seem to have 'fixed'
* it in the doc2000 driver in August 1999. dwmw2.
*/
- ret = nand_reset(chip, chipnr);
- if (ret)
- return ret;
+ /*
+ * Nand onfi compatible devices may support different data interface
+ * modes like SDR, NVDDR and NVDDR2. Giving reset to device places the
+ * device in to power-up state and places the target in the SDR data
+ * interface mode. This will be the problem for devices configured for
+ * NVDDR modes. So, limiting the reset operation to Toshiba devices.
+ */
+ if (chip->parameters.onfi->jedec_id == NAND_MFR_TOSHIBA) {
+ ret = nand_reset(chip, chipnr);
+ if (ret)
+ return ret;
+ }
nand_select_target(chip, chipnr);
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index 8fe8d7bdd203..6b1f727a2de6 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -149,6 +149,12 @@ int nand_onfi_detect(struct nand_chip *chip)
memorg = nanddev_get_memorg(&chip->base);
+ /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
+ return 0;
+ }
+
/* Try ONFI for unknown chip or LP */
ret = nand_readid_op(chip, 0x20, id, sizeof(id));
if (ret || strncmp(id, "ONFI", 4))
@@ -294,6 +300,8 @@ int nand_onfi_detect(struct nand_chip *chip)
onfi->tR = le16_to_cpu(p->t_r);
onfi->tCCS = le16_to_cpu(p->t_ccs);
onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode);
+ onfi->src_sync_timing_mode = le16_to_cpu(p->src_sync_timing_mode);
+ onfi->jedec_id = le16_to_cpu(p->jedec_id);
onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
chip->parameters.onfi = onfi;
diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c
index f12b7a7844c9..e4c161afd3e6 100644
--- a/drivers/mtd/nand/raw/nand_timings.c
+++ b/drivers/mtd/nand/raw/nand_timings.c
@@ -53,6 +53,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 120000,
.tWP_min = 50000,
.tWW_min = 100000,
+ .mode = 0,
},
},
/* Mode 1 */
@@ -95,6 +96,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 25000,
.tWW_min = 100000,
+ .mode = 1,
},
},
/* Mode 2 */
@@ -137,6 +139,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 17000,
.tWW_min = 100000,
+ .mode = 2,
},
},
/* Mode 3 */
@@ -179,6 +182,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 15000,
.tWW_min = 100000,
+ .mode = 3,
},
},
/* Mode 4 */
@@ -221,6 +225,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 12000,
.tWW_min = 100000,
+ .mode = 4,
},
},
/* Mode 5 */
@@ -263,6 +268,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
.tWHR_min = 80000,
.tWP_min = 10000,
.tWW_min = 100000,
+ .mode = 5,
},
},
};
diff --git a/drivers/mtd/nand/raw/pl353_nand.c b/drivers/mtd/nand/raw/pl353_nand.c
new file mode 100644
index 000000000000..c004dfa505ac
--- /dev/null
+++ b/drivers/mtd/nand/raw/pl353_nand.c
@@ -0,0 +1,1398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM PL353 NAND flash controller driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc
+ * Author: Punnaiah chowdary kalluri <punnaiah@xilinx.com>
+ * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pl353-smc.h>
+#include <linux/clk.h>
+
+#define PL353_NAND_DRIVER_NAME "pl353-nand"
+
+/* NAND flash driver defines */
+#define PL353_NAND_CMD_PHASE 1 /* End command valid in command phase */
+#define PL353_NAND_DATA_PHASE 2 /* End command valid in data phase */
+#define PL353_NAND_ECC_SIZE 512 /* Size of data for ECC operation */
+
+/* Flash memory controller operating parameters */
+
+#define PL353_NAND_ECC_CONFIG (BIT(4) | /* ECC read at end of page */ \
+ (0 << 5)) /* No Jumping */
+
+/* AXI Address definitions */
+#define START_CMD_SHIFT 3
+#define END_CMD_SHIFT 11
+#define END_CMD_VALID_SHIFT 20
+#define ADDR_CYCLES_SHIFT 21
+#define CLEAR_CS_SHIFT 21
+#define ECC_LAST_SHIFT 10
+#define COMMAND_PHASE (0 << 19)
+#define DATA_PHASE BIT(19)
+
+#define PL353_NAND_ECC_LAST BIT(ECC_LAST_SHIFT) /* Set ECC_Last */
+#define PL353_NAND_CLEAR_CS BIT(CLEAR_CS_SHIFT) /* Clear chip select */
+
+#define ONDIE_ECC_FEATURE_ADDR 0x90
+#define PL353_NAND_ECC_BUSY_TIMEOUT (1 * HZ)
+#define PL353_NAND_DEV_BUSY_TIMEOUT (1 * HZ)
+#define PL353_NAND_LAST_TRANSFER_LENGTH 4
+#define PL353_NAND_ECC_VALID_SHIFT 24
+#define PL353_NAND_ECC_VALID_MASK 0x40
+#define PL353_ECC_BITS_BYTEOFF_MASK 0x1FF
+#define PL353_ECC_BITS_BITOFF_MASK 0x7
+#define PL353_ECC_BIT_MASK 0xFFF
+#define PL353_TREA_MAX_VALUE 1
+#define PL353_MAX_ECC_CHUNKS 4
+#define PL353_MAX_ECC_BYTES 3
+
+struct pl353_nfc_op {
+ u32 cmnds[4];
+ u32 end_cmd;
+ u32 addrs;
+ u32 len;
+ u32 naddrs;
+ u32 addr5;
+ u32 addr6;
+ unsigned int data_instr_idx;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ unsigned int cle_ale_delay_ns;
+ const struct nand_op_instr *data_instr;
+};
+
+/**
+ * struct pl353_nand_controller - Defines the NAND flash controller driver
+ * instance
+ * @chip: NAND chip information structure
+ * @dev: Parent device (used to print error messages)
+ * @regs: Virtual address of the NAND flash device
+ * @buf_addr: Virtual address of the NAND flash device for
+ * data read/writes
+ * @addr_cycles: Address cycles
+ * @mclk: Memory controller clock
+ * @buswidth: Bus width 8 or 16
+ */
+struct pl353_nand_controller {
+ struct nand_chip chip;
+ struct device *dev;
+ void __iomem *regs;
+ void __iomem *buf_addr;
+ u8 addr_cycles;
+ struct clk *mclk;
+ u32 buswidth;
+};
+
+static int pl353_ecc_ooblayout16_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes);
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int pl353_ecc_ooblayout16_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes) + 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops pl353_ecc_ooblayout16_ops = {
+ .ecc = pl353_ecc_ooblayout16_ecc,
+ .free = pl353_ecc_ooblayout16_free,
+};
+
+static int pl353_ecc_ooblayout64_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes) + 52;
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int pl353_ecc_ooblayout64_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes) + 2;
+ oobregion->length = 50;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops pl353_ecc_ooblayout64_ops = {
+ .ecc = pl353_ecc_ooblayout64_ecc,
+ .free = pl353_ecc_ooblayout64_free,
+};
+
+/* Generic flash bbt decriptors */
+static u8 bbt_pattern[] = { 'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+static void pl353_nfc_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+
+ if (xnfc->buswidth == 8)
+ return;
+
+ if (force_8bit)
+ pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_8);
+ else
+ pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_16);
+}
+
+/**
+ * pl353_nand_read_data_op - read chip data into buffer
+ * @chip: Pointer to the NAND chip info structure
+ * @in: Pointer to the buffer to store read data
+ * @len: Number of bytes to read
+ * @force_8bit: Force 8-bit bus access
+ * Return: Always return zero
+ */
+static int pl353_nand_read_data_op(struct nand_chip *chip,
+ u8 *in,
+ unsigned int len, bool force_8bit)
+{
+ int i;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, true);
+
+ if ((IS_ALIGNED((uint32_t)in, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) || (!force_8bit)) {
+ u32 *ptr = (u32 *)in;
+
+ len /= 4;
+ for (i = 0; i < len; i++)
+ ptr[i] = readl(xnfc->buf_addr);
+ } else {
+ for (i = 0; i < len; i++)
+ in[i] = readb(xnfc->buf_addr);
+ }
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, false);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_write_buf - write buffer to chip
+ * @mtd: Pointer to the mtd info structure
+ * @buf: Pointer to the buffer to store write data
+ * @len: Number of bytes to write
+ * @force_8bit: Force 8-bit bus access
+ */
+static void pl353_nand_write_data_op(struct mtd_info *mtd, const u8 *buf,
+ int len, bool force_8bit)
+{
+ int i;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, true);
+
+ if ((IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) || (!force_8bit)) {
+ u32 *ptr = (u32 *)buf;
+
+ len /= 4;
+ for (i = 0; i < len; i++)
+ writel(ptr[i], xnfc->buf_addr);
+ } else {
+ for (i = 0; i < len; i++)
+ writeb(buf[i], xnfc->buf_addr);
+ }
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, false);
+}
+
+static int pl353_wait_for_ecc_done(void)
+{
+ unsigned long timeout = jiffies + PL353_NAND_ECC_BUSY_TIMEOUT;
+
+ do {
+ if (pl353_smc_ecc_is_busy())
+ cpu_relax();
+ else
+ break;
+ } while (!time_after_eq(jiffies, timeout));
+
+ if (time_after_eq(jiffies, timeout)) {
+ pr_err("%s timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_calculate_hwecc - Calculate Hardware ECC
+ * @mtd: Pointer to the mtd_info structure
+ * @data: Pointer to the page data
+ * @ecc: Pointer to the ECC buffer where ECC data needs to be stored
+ *
+ * This function retrieves the Hardware ECC data from the controller and returns
+ * ECC data back to the MTD subsystem.
+ * It operates on a number of 512 byte blocks of NAND memory and can be
+ * programmed to store the ECC codes after the data in memory. For writes,
+ * the ECC is written to the spare area of the page. For reads, the result of
+ * a block ECC check are made available to the device driver.
+ *
+ * ------------------------------------------------------------------------
+ * | n * 512 blocks | extra | ecc | |
+ * | | block | codes | |
+ * ------------------------------------------------------------------------
+ *
+ * The ECC calculation uses a simple Hamming code, using 1-bit correction 2-bit
+ * detection. It starts when a valid read or write command with a 512 byte
+ * aligned address is detected on the memory interface.
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int pl353_nand_calculate_hwecc(struct mtd_info *mtd,
+ const u8 *data, u8 *ecc)
+{
+ u32 ecc_value;
+ u8 chunk, ecc_byte, ecc_status;
+
+ for (chunk = 0; chunk < PL353_MAX_ECC_CHUNKS; chunk++) {
+ /* Read ECC value for each block */
+ ecc_value = pl353_smc_get_ecc_val(chunk);
+ ecc_status = (ecc_value >> PL353_NAND_ECC_VALID_SHIFT);
+
+ /* ECC value valid */
+ if (ecc_status & PL353_NAND_ECC_VALID_MASK) {
+ for (ecc_byte = 0; ecc_byte < PL353_MAX_ECC_BYTES;
+ ecc_byte++) {
+ /* Copy ECC bytes to MTD buffer */
+ *ecc = ~ecc_value & 0xFF;
+ ecc_value = ecc_value >> 8;
+ ecc++;
+ }
+ } else {
+ pr_warn("%s status failed\n", __func__);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_correct_data - ECC correction function
+ * @mtd: Pointer to the mtd_info structure
+ * @buf: Pointer to the page data
+ * @read_ecc: Pointer to the ECC value read from spare data area
+ * @calc_ecc: Pointer to the calculated ECC value
+ *
+ * This function corrects the ECC single bit errors & detects 2-bit errors.
+ *
+ * Return: 0 if no ECC errors found
+ * 1 if single bit error found and corrected.
+ * -1 if multiple uncorrectable ECC errors found.
+ */
+static int pl353_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ unsigned char bit_addr;
+ unsigned int byte_addr;
+ unsigned short ecc_odd, ecc_even, read_ecc_lower, read_ecc_upper;
+ unsigned short calc_ecc_lower, calc_ecc_upper;
+
+ read_ecc_lower = (read_ecc[0] | (read_ecc[1] << 8)) &
+ PL353_ECC_BIT_MASK;
+ read_ecc_upper = ((read_ecc[1] >> 4) | (read_ecc[2] << 4)) &
+ PL353_ECC_BIT_MASK;
+
+ calc_ecc_lower = (calc_ecc[0] | (calc_ecc[1] << 8)) &
+ PL353_ECC_BIT_MASK;
+ calc_ecc_upper = ((calc_ecc[1] >> 4) | (calc_ecc[2] << 4)) &
+ PL353_ECC_BIT_MASK;
+
+ ecc_odd = read_ecc_lower ^ calc_ecc_lower;
+ ecc_even = read_ecc_upper ^ calc_ecc_upper;
+
+ /* no error */
+ if (!ecc_odd && !ecc_even)
+ return 0;
+
+ if (ecc_odd == (~ecc_even & PL353_ECC_BIT_MASK)) {
+ /* bits [11:3] of error code is byte offset */
+ byte_addr = (ecc_odd >> 3) & PL353_ECC_BITS_BYTEOFF_MASK;
+ /* bits [2:0] of error code is bit offset */
+ bit_addr = ecc_odd & PL353_ECC_BITS_BITOFF_MASK;
+ /* Toggling error bit */
+ buf[byte_addr] ^= (BIT(bit_addr));
+ return 1;
+ }
+
+ /* one error in parity */
+ if (hweight32(ecc_odd | ecc_even) == 1)
+ return 1;
+
+ /* Uncorrectable error */
+ return -1;
+}
+
+static void pl353_prepare_cmd(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int column, int start_cmd, int end_cmd,
+ bool read)
+{
+ unsigned long data_phase_addr;
+ u32 end_cmd_valid = 0;
+ unsigned long cmd_phase_addr = 0, cmd_phase_data = 0;
+
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+
+ end_cmd_valid = read ? 1 : 0;
+
+ cmd_phase_addr = (unsigned long __force)xnfc->regs +
+ ((xnfc->addr_cycles
+ << ADDR_CYCLES_SHIFT) |
+ (end_cmd_valid << END_CMD_VALID_SHIFT) |
+ (COMMAND_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (start_cmd << START_CMD_SHIFT));
+
+ /* Get the data phase address */
+ data_phase_addr = (unsigned long __force)xnfc->regs +
+ ((0x0 << CLEAR_CS_SHIFT) |
+ (0 << END_CMD_VALID_SHIFT) |
+ (DATA_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (0x0 << ECC_LAST_SHIFT));
+
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ column /= 2;
+ cmd_phase_data = column;
+ if (mtd->writesize > PL353_NAND_ECC_SIZE) {
+ cmd_phase_data |= page << 16;
+ /* Another address cycle for devices > 128MiB */
+ if (chip->options & NAND_ROW_ADDR_3) {
+ writel_relaxed(cmd_phase_data,
+ (void __iomem * __force)cmd_phase_addr);
+ cmd_phase_data = (page >> 16);
+ }
+ } else {
+ cmd_phase_data |= page << 8;
+ }
+
+ writel_relaxed(cmd_phase_data, (void __iomem * __force)cmd_phase_addr);
+}
+
+/**
+ * pl353_nand_read_oob - [REPLACEABLE] the most common OOB data read function
+ * @mtd: Pointer to the mtd_info structure
+ * @chip: Pointer to the nand_chip structure
+ * @page: Page number to read
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ unsigned long data_phase_addr;
+ u8 *p;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ chip->pagebuf = -1;
+ if (mtd->writesize < PL353_NAND_ECC_SIZE)
+ return 0;
+
+ pl353_prepare_cmd(mtd, chip, page, mtd->writesize, NAND_CMD_READ0,
+ NAND_CMD_READSTART, 1);
+
+ nand_wait_ready(mtd);
+
+ p = chip->oob_poi;
+ pl353_nand_read_data_op(chip, p,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ p += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_read_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_write_oob - [REPLACEABLE] the most common OOB data write function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @page: Page number to write
+ *
+ * Return: Zero on success and EIO on failure
+ */
+static int pl353_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ const u8 *buf = chip->oob_poi;
+ unsigned long data_phase_addr;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+ u32 addrcycles = 0;
+
+ chip->pagebuf = -1;
+ addrcycles = xnfc->addr_cycles;
+ pl353_prepare_cmd(mtd, chip, page, mtd->writesize, NAND_CMD_SEQIN,
+ NAND_CMD_PAGEPROG, 0);
+
+ pl353_nand_write_data_op(mtd, buf,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ buf += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr |= (1 << END_CMD_VALID_SHIFT);
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_write_data_op(mtd, buf, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+ nand_wait_ready(mtd);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_read_page_raw - [Intern] read raw page data without ecc
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the data buffer
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to read
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ unsigned long data_phase_addr;
+ u8 *p;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ pl353_prepare_cmd(mtd, chip, page, 0, NAND_CMD_READ0,
+ NAND_CMD_READSTART, 1);
+ nand_wait_ready(mtd);
+ pl353_nand_read_data_op(chip, buf, mtd->writesize, false);
+ p = chip->oob_poi;
+ pl353_nand_read_data_op(chip, p,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ p += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ pl353_nand_read_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_write_page_raw - [Intern] raw page write function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the data buffer
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to write
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ unsigned long data_phase_addr;
+ u8 *p;
+
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ pl353_prepare_cmd(mtd, chip, page, 0, NAND_CMD_SEQIN,
+ NAND_CMD_PAGEPROG, 0);
+ pl353_nand_write_data_op(mtd, buf, mtd->writesize, false);
+ p = chip->oob_poi;
+ pl353_nand_write_data_op(mtd, p,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ p += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr |= (1 << END_CMD_VALID_SHIFT);
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_write_data_op(mtd, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ return 0;
+}
+
+/**
+ * nand_write_page_hwecc - Hardware ECC based page write function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the data buffer
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to write
+ *
+ * This functions writes data and hardware generated ECC values in to the page.
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccsteps = chip->ecc.steps;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *oob_ptr;
+ const u8 *p = buf;
+ u32 ret;
+ unsigned long data_phase_addr;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ pl353_prepare_cmd(mtd, chip, page, 0, NAND_CMD_SEQIN,
+ NAND_CMD_PAGEPROG, 0);
+
+ for ( ; (eccsteps - 1); eccsteps--) {
+ pl353_nand_write_data_op(mtd, p, eccsize, false);
+ p += eccsize;
+ }
+ pl353_nand_write_data_op(mtd, p,
+ (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH),
+ false);
+ p += (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ /* Set ECC Last bit to 1 */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_ECC_LAST;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_write_data_op(mtd, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ /* Wait till the ECC operation is complete or timeout */
+ ret = pl353_wait_for_ecc_done();
+ if (ret)
+ dev_err(xnfc->dev, "ECC Timeout\n");
+ p = buf;
+ ret = chip->ecc.calculate(mtd, p, &ecc_calc[0]);
+ if (ret)
+ return ret;
+
+ /* Wait for ECC to be calculated and read the error values */
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi,
+ 0, chip->ecc.total);
+ if (ret)
+ return ret;
+ /* Clear ECC last bit */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr &= ~PL353_NAND_ECC_LAST;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ /* Write the spare area with ECC bytes */
+ oob_ptr = chip->oob_poi;
+ pl353_nand_write_data_op(mtd, oob_ptr,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr |= (1 << END_CMD_VALID_SHIFT);
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ oob_ptr += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ pl353_nand_write_data_op(mtd, oob_ptr, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+ nand_wait_ready(mtd);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_read_page_hwecc - Hardware ECC based page read function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the buffer to store read data
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to read
+ *
+ * This functions reads data and checks the data integrity by comparing
+ * hardware generated ECC values and read ECC values from spare area.
+ * There is a limitation in SMC controller, that we must set ECC LAST on
+ * last data phase access, to tell ECC block not to expect any data further.
+ * Ex: When number of ECC STEPS are 4, then till 3 we will write to flash
+ * using SMC with HW ECC enabled. And for the last ECC STEP, we will subtract
+ * 4bytes from page size, and will initiate a transfer. And the remaining 4 as
+ * one more transfer with ECC_LAST bit set in NAND data phase register to
+ * notify ECC block not to expect any more data. The last block should be align
+ * with end of 512 byte block. Because of this limitation, we are not using
+ * core routines.
+ *
+ * Return: 0 always and updates ECC operation status in to MTD structure
+ */
+static int pl353_nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ int i, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
+ u8 *oob_ptr;
+ u32 ret;
+ unsigned long data_phase_addr;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long nand_offset = (unsigned long __force)xnfc->regs;
+
+ pl353_prepare_cmd(mtd, chip, page, 0, NAND_CMD_READ0,
+ NAND_CMD_READSTART, 1);
+ nand_wait_ready(mtd);
+
+ for ( ; (eccsteps - 1); eccsteps--) {
+ pl353_nand_read_data_op(chip, p, eccsize, false);
+ p += eccsize;
+ }
+ pl353_nand_read_data_op(chip, p,
+ (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH),
+ false);
+ p += (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ /* Set ECC Last bit to 1 */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_ECC_LAST;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+ pl353_nand_read_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ /* Wait till the ECC operation is complete or timeout */
+ ret = pl353_wait_for_ecc_done();
+ if (ret)
+ dev_err(xnfc->dev, "ECC Timeout\n");
+
+ /* Read the calculated ECC value */
+ p = buf;
+ ret = chip->ecc.calculate(mtd, p, &ecc_calc[0]);
+ if (ret)
+ return ret;
+
+ /* Clear ECC last bit */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr &= ~PL353_NAND_ECC_LAST;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ /* Read the stored ECC value */
+ oob_ptr = chip->oob_poi;
+ pl353_nand_read_data_op(chip, oob_ptr,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+
+ /* de-assert chip select */
+ data_phase_addr = (unsigned long __force)xnfc->buf_addr;
+ data_phase_addr -= nand_offset;
+ data_phase_addr |= PL353_NAND_CLEAR_CS;
+ data_phase_addr += nand_offset;
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ oob_ptr += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ pl353_nand_read_data_op(chip, oob_ptr, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ /* Check ECC error for all blocks and correct if it is correctable */
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ stat = chip->ecc.correct(mtd, p, &ecc[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ return max_bitflips;
+}
+
+/**
+ * pl353_nand_select_chip - Select the flash device
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ *
+ * This function is empty as the NAND controller handles chip select line
+ * internally based on the chip address passed in command and data phase.
+ */
+static void pl353_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void pl353_nfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct pl353_nfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id, offset, naddrs;
+ int i, len;
+ const u8 *addrs;
+
+ memset(nfc_op, 0, sizeof(struct pl353_nfc_op));
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ nfc_op->len = nand_subop_get_data_len(subop, op_id);
+ len = nand_subop_get_data_len(subop, op_id);
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (op_id)
+ nfc_op->cmnds[1] = instr->ctx.cmd.opcode;
+ else
+ nfc_op->cmnds[0] = instr->ctx.cmd.opcode;
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ nfc_op->addrs = instr->ctx.addr.addrs[offset];
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++) {
+ nfc_op->addrs |= instr->ctx.addr.addrs[i] <<
+ (8 * i);
+ }
+
+ if (naddrs >= 5)
+ nfc_op->addr5 = addrs[4];
+ if (naddrs >= 6)
+ nfc_op->addr6 = addrs[5];
+ nfc_op->naddrs = nand_subop_get_num_addr_cyc(subop,
+ op_id);
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ nfc_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+}
+
+static void cond_delay(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+/**
+ * pl353_nand_exec_op_cmd - Send command to NAND device
+ * @chip: Pointer to the NAND chip info structure
+ * @subop: Pointer to array of instructions
+ * Return: Always return zero
+ */
+static int pl353_nand_exec_op_cmd(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_op_instr *instr;
+ struct pl353_nfc_op nfc_op = {};
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ unsigned long cmd_phase_data = 0, end_cmd_valid = 0;
+ unsigned long cmd_phase_addr, data_phase_addr, end_cmd;
+ unsigned int op_id, len, offset;
+ bool reading;
+
+ pl353_nfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+ offset = nand_subop_get_data_start_off(subop, op_id);
+
+ pl353_smc_clr_nand_int();
+ /* Get the command phase address */
+ if (nfc_op.cmnds[1] != 0) {
+ if (nfc_op.cmnds[0] == NAND_CMD_SEQIN)
+ end_cmd_valid = 0;
+ else
+ end_cmd_valid = 1;
+ end_cmd = nfc_op.cmnds[1];
+ } else {
+ end_cmd = 0x0;
+ }
+
+ /*
+ * The SMC defines two phases of commands when transferring data to or
+ * from NAND flash.
+ * Command phase: Commands and optional address information are written
+ * to the NAND flash.The command and address can be associated with
+ * either a data phase operation to write to or read from the array,
+ * or a status/ID register transfer.
+ * Data phase: Data is either written to or read from the NAND flash.
+ * This data can be either data transferred to or from the array,
+ * or status/ID register information.
+ */
+ cmd_phase_addr = (unsigned long __force)xnfc->regs +
+ ((nfc_op.naddrs << ADDR_CYCLES_SHIFT) |
+ (end_cmd_valid << END_CMD_VALID_SHIFT) |
+ (COMMAND_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (nfc_op.cmnds[0] << START_CMD_SHIFT));
+
+ /* Get the data phase address */
+ end_cmd_valid = 0;
+
+ data_phase_addr = (unsigned long __force)xnfc->regs +
+ ((0x0 << CLEAR_CS_SHIFT) |
+ (end_cmd_valid << END_CMD_VALID_SHIFT) |
+ (DATA_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (0x0 << ECC_LAST_SHIFT));
+ xnfc->buf_addr = (void __iomem * __force)data_phase_addr;
+
+ /* Command phase AXI Read & Write */
+ if (nfc_op.naddrs >= 5) {
+ if (mtd->writesize > PL353_NAND_ECC_SIZE) {
+ cmd_phase_data = nfc_op.addrs;
+ /* Another address cycle for devices > 128MiB */
+ if (chip->options & NAND_ROW_ADDR_3) {
+ writel_relaxed(cmd_phase_data,
+ (void __iomem * __force)
+ cmd_phase_addr);
+ cmd_phase_data = nfc_op.addr5;
+ if (nfc_op.naddrs >= 6)
+ cmd_phase_data |= (nfc_op.addr6 << 8);
+ }
+ }
+ } else {
+ if (nfc_op.addrs != -1) {
+ int column = nfc_op.addrs;
+ /*
+ * Change read/write column, read id etc
+ * Adjust columns for 16 bit bus width
+ */
+ if ((chip->options & NAND_BUSWIDTH_16) &&
+ (nfc_op.cmnds[0] == NAND_CMD_READ0 ||
+ nfc_op.cmnds[0] == NAND_CMD_SEQIN ||
+ nfc_op.cmnds[0] == NAND_CMD_RNDOUT ||
+ nfc_op.cmnds[0] == NAND_CMD_RNDIN)) {
+ column >>= 1;
+ }
+ cmd_phase_data = column;
+ }
+ }
+ writel_relaxed(cmd_phase_data, (void __iomem * __force)cmd_phase_addr);
+
+ if (!nfc_op.data_instr) {
+ if (nfc_op.rdy_timeout_ms)
+ nand_wait_ready(mtd);
+ return 0;
+ }
+
+ reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
+ if (!reading) {
+ pl353_nand_write_data_op(mtd, instr->ctx.data.buf.out,
+ len, instr->ctx.data.force_8bit);
+ if (nfc_op.rdy_timeout_ms)
+ nand_wait_ready(mtd);
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+ if (reading) {
+ cond_delay(nfc_op.rdy_delay_ns);
+ if (nfc_op.rdy_timeout_ms)
+ nand_wait_ready(mtd);
+ pl353_nand_read_data_op(chip, instr->ctx.data.buf.in, len,
+ instr->ctx.data.force_8bit);
+ }
+
+ return 0;
+}
+
+static const struct nand_op_parser pl353_nfc_op_parser = NAND_OP_PARSER
+ (NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 2048)),
+ NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 2048)),
+ NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, 8),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2048),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN
+ (pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ );
+
+static int pl353_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ return nand_op_parser_exec_op(chip, &pl353_nfc_op_parser,
+ op, check_only);
+}
+
+/**
+ * pl353_nand_device_ready - Check device ready/busy line
+ * @mtd: Pointer to the mtd_info structure
+ *
+ * Return: 0 on busy or 1 on ready state
+ */
+static int pl353_nand_device_ready(struct mtd_info *mtd)
+{
+ if (pl353_smc_get_nand_int_status_raw()) {
+ pl353_smc_clr_nand_int();
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_ecc_init - Initialize the ecc information as per the ecc mode
+ * @mtd: Pointer to the mtd_info structure
+ * @ecc: Pointer to ECC control structure
+ * @ecc_mode: ondie ecc status
+ *
+ * This function initializes the ecc block and functional pointers as per the
+ * ecc mode
+ *
+ * Return: 0 on success or negative errno.
+ */
+static int pl353_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
+ int ecc_mode)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ int err = 0;
+
+ ecc->read_oob = pl353_nand_read_oob;
+ ecc->write_oob = pl353_nand_write_oob;
+
+ if (ecc_mode == NAND_ECC_ON_DIE) {
+ ecc->write_page_raw = pl353_nand_write_page_raw;
+ ecc->read_page_raw = pl353_nand_read_page_raw;
+ pl353_smc_set_ecc_mode(PL353_SMC_ECCMODE_BYPASS);
+ /*
+ * On-Die ECC spare bytes offset 8 is used for ECC codes
+ * Use the BBT pattern descriptors
+ */
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ } else {
+
+ ecc->mode = NAND_ECC_HW;
+ /* Hardware ECC generates 3 bytes ECC code for each 512 bytes */
+ ecc->bytes = 3;
+ ecc->strength = 1;
+ ecc->calculate = pl353_nand_calculate_hwecc;
+ ecc->correct = pl353_nand_correct_data;
+ ecc->read_page = pl353_nand_read_page_hwecc;
+ ecc->size = PL353_NAND_ECC_SIZE;
+ ecc->read_page = pl353_nand_read_page_hwecc;
+ ecc->write_page = pl353_nand_write_page_hwecc;
+ pl353_smc_set_ecc_pg_size(mtd->writesize);
+ switch (mtd->writesize) {
+ case SZ_512:
+ case SZ_1K:
+ case SZ_2K:
+ pl353_smc_set_ecc_mode(PL353_SMC_ECCMODE_APB);
+ break;
+ default:
+ ecc->calculate = nand_calculate_ecc;
+ ecc->correct = nand_correct_data;
+ ecc->size = 256;
+ break;
+ }
+
+ if (mtd->oobsize == 16) {
+ mtd_set_ooblayout(mtd, &pl353_ecc_ooblayout16_ops);
+ } else if (mtd->oobsize == 64) {
+ mtd_set_ooblayout(mtd, &pl353_ecc_ooblayout64_ops);
+ } else {
+ err = -ENXIO;
+ dev_err(xnfc->dev, "Unsupported oob Layout\n");
+ }
+ }
+
+ return err;
+}
+
+static int pl353_setup_data_interface(struct mtd_info *mtd, int csline,
+ const struct nand_data_interface *conf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ const struct nand_sdr_timings *sdr;
+ u32 timings[7], mckperiodps;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ /*
+ * SDR timings are given in pico-seconds while NFC timings must be
+ * expressed in NAND controller clock cycles.
+ */
+ mckperiodps = NSEC_PER_SEC / clk_get_rate(xnfc->mclk);
+ mckperiodps *= 1000;
+ if (sdr->tRC_min <= 20000)
+ /*
+ * PL353 SMC needs one extra read cycle in SDR Mode 5
+ * This is not written anywhere in the datasheet but
+ * the results observed during testing.
+ */
+ timings[0] = DIV_ROUND_UP(sdr->tRC_min, mckperiodps) + 1;
+ else
+ timings[0] = DIV_ROUND_UP(sdr->tRC_min, mckperiodps);
+
+ timings[1] = DIV_ROUND_UP(sdr->tWC_min, mckperiodps);
+ /*
+ * For all SDR modes, PL353 SMC needs tREA max value as 1,
+ * Results observed during testing.
+ */
+ timings[2] = PL353_TREA_MAX_VALUE;
+ timings[3] = DIV_ROUND_UP(sdr->tWP_min, mckperiodps);
+ timings[4] = DIV_ROUND_UP(sdr->tCLR_min, mckperiodps);
+ timings[5] = DIV_ROUND_UP(sdr->tAR_min, mckperiodps);
+ timings[6] = DIV_ROUND_UP(sdr->tRR_min, mckperiodps);
+ pl353_smc_set_cycles(timings);
+
+ return 0;
+}
+
+static int pl353_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+ u32 ret;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_16);
+
+ if (mtd->writesize <= SZ_512)
+ xnfc->addr_cycles = 1;
+ else
+ xnfc->addr_cycles = 2;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ xnfc->addr_cycles += 3;
+ else
+ xnfc->addr_cycles += 2;
+
+ ret = pl353_nand_ecc_init(mtd, &chip->ecc, chip->ecc.mode);
+ if (ret) {
+ dev_err(xnfc->dev, "ECC init failed\n");
+ return ret;
+ }
+
+ if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your NAND node, ie:
+ *
+ * label = "pl353-nand";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(xnfc->dev, GFP_KERNEL,
+ "%s", PL353_NAND_DRIVER_NAME);
+ if (!mtd->name) {
+ dev_err(xnfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+
+}
+
+static const struct nand_controller_ops pl353_nand_controller_ops = {
+ .attach_chip = pl353_nand_attach_chip,
+};
+
+/**
+ * pl353_nand_probe - Probe method for the NAND driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ * The NAND driver has dependency with the pl353_smc memory controller
+ * driver for initializing the NAND timing parameters, bus width, ECC modes,
+ * control and status information.
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int pl353_nand_probe(struct platform_device *pdev)
+{
+ struct pl353_nand_controller *xnfc;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct resource *res;
+ struct device_node *np, *dn;
+ u32 ret, val;
+
+ xnfc = devm_kzalloc(&pdev->dev, sizeof(*xnfc), GFP_KERNEL);
+ if (!xnfc)
+ return -ENOMEM;
+ xnfc->dev = &pdev->dev;
+
+ /* Map physical address of NAND flash */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xnfc->regs = devm_ioremap_resource(xnfc->dev, res);
+ if (IS_ERR(xnfc->regs))
+ return PTR_ERR(xnfc->regs);
+
+ chip = &xnfc->chip;
+ mtd = nand_to_mtd(chip);
+ chip->exec_op = pl353_nfc_exec_op;
+ nand_set_controller_data(chip, xnfc);
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+
+ nand_set_flash_node(chip, xnfc->dev->of_node);
+
+ /* Set the driver entry points for MTD */
+ chip->dev_ready = pl353_nand_device_ready;
+ chip->select_chip = pl353_nand_select_chip;
+ /* If we don't set this delay driver sets 20us by default */
+ np = of_get_next_parent(xnfc->dev->of_node);
+ xnfc->mclk = of_clk_get(np, 0);
+ if (IS_ERR(xnfc->mclk)) {
+ dev_err(xnfc->dev, "Failed to retrieve MCK clk\n");
+ return PTR_ERR(xnfc->mclk);
+ }
+
+ dn = nand_get_flash_node(chip);
+ ret = of_property_read_u32(dn, "nand-bus-width", &val);
+ if (ret)
+ val = 8;
+
+ xnfc->buswidth = val;
+ chip->chip_delay = 30;
+ /* Set the device option and flash width */
+ chip->options = NAND_BUSWIDTH_AUTO;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ platform_set_drvdata(pdev, xnfc);
+ chip->setup_data_interface = pl353_setup_data_interface;
+ chip->dummy_controller.ops = &pl353_nand_controller_ops;
+ ret = nand_scan(mtd, 1);
+ if (ret) {
+ dev_err(xnfc->dev, "could not scan the nand chip\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(xnfc->dev, "Failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_remove - Remove method for the NAND driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if the driver module is being unloaded. It frees all
+ * resources allocated to the device.
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int pl353_nand_remove(struct platform_device *pdev)
+{
+ struct pl353_nand_controller *xnfc = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = nand_to_mtd(&xnfc->chip);
+
+ /* Release resources, unregister device */
+ nand_release(mtd);
+
+ return 0;
+}
+
+/* Match table for device tree binding */
+static const struct of_device_id pl353_nand_of_match[] = {
+ { .compatible = "arm,pl353-nand-r2p1" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pl353_nand_of_match);
+
+/*
+ * pl353_nand_driver - This structure defines the NAND subsystem platform driver
+ */
+static struct platform_driver pl353_nand_driver = {
+ .probe = pl353_nand_probe,
+ .remove = pl353_nand_remove,
+ .driver = {
+ .name = PL353_NAND_DRIVER_NAME,
+ .of_match_table = pl353_nand_of_match,
+ },
+};
+
+module_platform_driver(pl353_nand_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_ALIAS("platform:" PL353_NAND_DRIVER_NAME);
+MODULE_DESCRIPTION("ARM PL353 NAND Flash Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index e93584650dfc..1cf29ca05f4b 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -21,6 +21,7 @@
#include <linux/of_platform.h>
#include <linux/spi/flash.h>
#include <linux/mtd/spi-nor.h>
+#include <linux/spi/spi.h>
/* Define max times to check status register before we give up. */
@@ -142,6 +143,23 @@ struct sfdp_header {
/* Basic Flash Parameter Table */
+bool update_stripe(const u8 opcode)
+{
+ if (opcode == SPINOR_OP_BE_4K ||
+ opcode == SPINOR_OP_BE_32K ||
+ opcode == SPINOR_OP_CHIP_ERASE ||
+ opcode == SPINOR_OP_SE ||
+ opcode == SPINOR_OP_BE_32K_4B ||
+ opcode == SPINOR_OP_SE_4B ||
+ opcode == SPINOR_OP_BE_4K_4B ||
+ opcode == SPINOR_OP_WRSR ||
+ opcode == SPINOR_OP_WREAR ||
+ opcode == SPINOR_OP_BRWR ||
+ opcode == SPINOR_OP_WRSR2)
+ return false;
+
+ return true;
+}
/*
* JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
* They are indexed from 1 but C arrays are indexed from 0.
@@ -250,7 +268,7 @@ struct flash_info {
u16 page_size;
u16 addr_width;
- u16 flags;
+ u32 flags;
#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
#define SST_WRITE BIT(2) /* use SST byte programming */
@@ -279,6 +297,9 @@ struct flash_info {
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */
+#define SST_GLOBAL_PROT_UNLK BIT(16) /* Unlock the Global protection for
+ * sst flashes
+ */
/* Part specific fixup hooks. */
const struct spi_nor_fixups *fixups;
@@ -288,6 +309,8 @@ struct flash_info {
#define JEDEC_MFR(info) ((info)->id[0])
+static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr);
+
/*
* Read the status register, returning its value in the location
* Return the status register value.
@@ -296,15 +319,24 @@ struct flash_info {
static int read_sr(struct spi_nor *nor)
{
int ret;
- u8 val;
+ u8 val[2];
- ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
- if (ret < 0) {
- pr_err("error %d reading SR\n", (int) ret);
- return ret;
+ if (nor->isparallel) {
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val[0], 2);
+ if (ret < 0) {
+ pr_err("error %d reading SR\n", (int) ret);
+ return ret;
+ }
+ val[0] |= val[1];
+ } else {
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val[0], 1);
+ if (ret < 0) {
+ pr_err("error %d reading SR\n", (int) ret);
+ return ret;
+ }
}
- return val;
+ return val[0];
}
/*
@@ -315,15 +347,24 @@ static int read_sr(struct spi_nor *nor)
static int read_fsr(struct spi_nor *nor)
{
int ret;
- u8 val;
+ u8 val[2];
- ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
- if (ret < 0) {
- pr_err("error %d reading FSR\n", ret);
- return ret;
+ if (nor->isparallel) {
+ ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val[0], 2);
+ if (ret < 0) {
+ pr_err("error %d reading FSR\n", ret);
+ return ret;
+ }
+ val[0] &= val[1];
+ } else {
+ ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val[0], 1);
+ if (ret < 0) {
+ pr_err("error %d reading FSR\n", ret);
+ return ret;
+ }
}
- return val;
+ return val[0];
}
/*
@@ -513,6 +554,38 @@ static int set_4byte(struct spi_nor *nor, bool enable)
}
}
+/**
+ * read_ear - Get the extended/bank address register value
+ * @nor: Pointer to the flash control structure
+ *
+ * This routine reads the Extended/bank address register value
+ *
+ * Return: Negative if error occured.
+ */
+static int read_ear(struct spi_nor *nor, struct flash_info *info)
+{
+ int ret;
+ u8 val;
+ u8 code;
+
+ /* This is actually Spansion */
+ if (JEDEC_MFR(info) == CFI_MFR_AMD)
+ code = SPINOR_OP_BRRD;
+ /* This is actually Micron */
+ else if (JEDEC_MFR(info) == CFI_MFR_ST ||
+ JEDEC_MFR(info) == CFI_MFR_MACRONIX ||
+ JEDEC_MFR(info) == SNOR_MFR_ISSI)
+ code = SPINOR_OP_RDEAR;
+ else
+ return -EINVAL;
+
+ ret = nor->read_reg(nor, code, &val, 1);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
static int s3an_sr_ready(struct spi_nor *nor)
{
int ret;
@@ -622,15 +695,81 @@ static int spi_nor_wait_till_ready(struct spi_nor *nor)
}
/*
+ * Update Extended Address/bank selection Register.
+ * Call with flash->lock locked.
+ */
+static int write_ear(struct spi_nor *nor, u32 addr)
+{
+ u8 code;
+ u8 ear;
+ int ret;
+ struct mtd_info *mtd = &nor->mtd;
+
+ /* Wait until finished previous write command. */
+ if (spi_nor_wait_till_ready(nor))
+ return 1;
+
+ if (mtd->size <= (0x1000000) << nor->shift)
+ return 0;
+
+ addr = addr % (u32) mtd->size;
+ ear = addr >> 24;
+
+ if ((!nor->isstacked) && (ear == nor->curbank))
+ return 0;
+
+ if (nor->isstacked && (mtd->size <= 0x2000000))
+ return 0;
+
+ if (nor->jedec_id == CFI_MFR_AMD)
+ code = SPINOR_OP_BRWR;
+ if (nor->jedec_id == CFI_MFR_ST ||
+ nor->jedec_id == CFI_MFR_MACRONIX ||
+ nor->jedec_id == SNOR_MFR_ISSI) {
+ write_enable(nor);
+ code = SPINOR_OP_WREAR;
+ }
+ nor->cmd_buf[0] = ear;
+
+ ret = nor->write_reg(nor, code, nor->cmd_buf, 1);
+ if (ret < 0)
+ return ret;
+
+ nor->curbank = ear;
+
+ return 0;
+}
+
+/*
* Erase the whole flash memory
*
* Returns 0 if successful, non-zero otherwise.
*/
static int erase_chip(struct spi_nor *nor)
{
+ u32 ret;
+
dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
- return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
+ if (nor->isstacked)
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+
+ ret = nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
+ if (ret)
+ return ret;
+
+ if (nor->isstacked) {
+ /* Wait until previous write command finished */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+
+ ret = nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
+ }
+ return ret;
+
}
static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
@@ -982,7 +1121,7 @@ destroy_erase_cmd_list:
static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- u32 addr, len;
+ u32 addr, len, offset;
uint32_t rem;
int ret;
@@ -1034,9 +1173,35 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
/* "sector"-at-a-time erase */
} else if (spi_nor_has_uniform_erase(nor)) {
while (len) {
+
write_enable(nor);
+ offset = addr;
+ if (nor->isparallel == 1)
+ offset /= 2;
+
+ if (nor->isstacked == 1) {
+ if (offset >= (mtd->size / 2)) {
+ offset = offset - (mtd->size / 2);
+ nor->spi->master->flags |=
+ SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &=
+ ~SPI_MASTER_U_PAGE;
+ }
+ }
+ if (nor->addr_width == 3) {
+ /* Update Extended Address Register */
+ ret = write_ear(nor, offset);
+ if (ret)
+ goto erase_err;
+ }
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
- ret = spi_nor_erase_sector(nor, addr);
+ write_enable(nor);
+
+ ret = spi_nor_erase_sector(nor, offset);
if (ret)
goto erase_err;
@@ -1063,6 +1228,118 @@ erase_err:
return ret;
}
+static inline uint16_t min_lockable_sectors(struct spi_nor *nor,
+ uint16_t n_sectors)
+{
+ uint16_t lock_granularity;
+
+ /*
+ * Revisit - SST (not used by us) has the same JEDEC ID as micron but
+ * protected area table is similar to that of spansion.
+ */
+ lock_granularity = max(1, n_sectors/M25P_MAX_LOCKABLE_SECTORS);
+ if (nor->jedec_id == CFI_MFR_ST) /* Micron */
+ lock_granularity = 1;
+
+ return lock_granularity;
+}
+
+static inline uint32_t get_protected_area_start(struct spi_nor *nor,
+ uint8_t lock_bits)
+{
+ u16 n_sectors;
+ u32 sector_size;
+ uint64_t mtd_size;
+ struct mtd_info *mtd = &nor->mtd;
+
+ n_sectors = nor->n_sectors;
+ sector_size = nor->sector_size;
+ mtd_size = mtd->size;
+
+ if (nor->isparallel) {
+ sector_size = (nor->sector_size >> 1);
+ mtd_size = (mtd->size >> 1);
+ }
+ if (nor->isstacked) {
+ n_sectors = (nor->n_sectors >> 1);
+ mtd_size = (mtd->size >> 1);
+ }
+
+ return mtd_size - (1<<(lock_bits-1)) *
+ min_lockable_sectors(nor, n_sectors) * sector_size;
+}
+
+static uint8_t min_protected_area_including_offset(struct spi_nor *nor,
+ uint32_t offset)
+{
+ uint8_t lock_bits, lockbits_limit;
+
+ /*
+ * Revisit - SST (not used by us) has the same JEDEC ID as micron but
+ * protected area table is similar to that of spansion.
+ * Mircon has 4 block protect bits.
+ */
+ lockbits_limit = 7;
+ if (nor->jedec_id == CFI_MFR_ST) /* Micron */
+ lockbits_limit = 15;
+
+ for (lock_bits = 1; lock_bits < lockbits_limit; lock_bits++) {
+ if (offset >= get_protected_area_start(nor, lock_bits))
+ break;
+ }
+ return lock_bits;
+}
+
+static int write_sr_modify_protection(struct spi_nor *nor, uint8_t status,
+ uint8_t lock_bits)
+{
+ uint8_t status_new, bp_mask;
+ u8 val[2];
+
+ status_new = status & ~SR_BP_BIT_MASK;
+ bp_mask = (lock_bits << SR_BP_BIT_OFFSET) & SR_BP_BIT_MASK;
+
+ /* Micron */
+ if (nor->jedec_id == CFI_MFR_ST) {
+ /* To support chips with more than 896 sectors (56MB) */
+ status_new &= ~SR_BP3;
+
+ /* Protected area starts from top */
+ status_new &= ~SR_BP_TB;
+
+ if (lock_bits > 7)
+ bp_mask |= SR_BP3;
+ }
+
+ if (nor->is_lock)
+ status_new |= bp_mask;
+
+ write_enable(nor);
+
+ /* For spansion flashes */
+ if (nor->jedec_id == CFI_MFR_AMD) {
+ val[1] = read_cr(nor) << 8;
+ val[0] |= status_new;
+ if (write_sr_cr(nor, val) < 0)
+ return 1;
+ } else {
+ if (write_sr(nor, status_new) < 0)
+ return 1;
+ }
+ return 0;
+}
+
+static uint8_t bp_bits_from_sr(struct spi_nor *nor, uint8_t status)
+{
+ uint8_t ret;
+
+ ret = (((status) & SR_BP_BIT_MASK) >> SR_BP_BIT_OFFSET);
+ if (nor->jedec_id == 0x20)
+ ret |= ((status & SR_BP3) >> (SR_BP_BIT_OFFSET + 1));
+
+ return ret;
+}
+
/* Write status register and ensure bits in mask match written values */
static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
{
@@ -1353,13 +1630,42 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
+ uint8_t status;
+ uint8_t lock_bits;
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
if (ret)
return ret;
+ if (nor->isparallel == 1)
+ ofs = ofs >> nor->shift;
+
+ if (nor->isstacked == 1) {
+ if (ofs >= (mtd->size / 2)) {
+ ofs = ofs - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
+
ret = nor->flash_lock(nor, ofs, len);
+ /* Wait until finished previous command */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto err;
+
+ status = read_sr(nor);
+ lock_bits = min_protected_area_including_offset(nor, ofs);
+
+ /* Only modify protection if it will not unlock other areas */
+ if (lock_bits > bp_bits_from_sr(nor, status)) {
+ nor->is_lock = 1;
+ ret = write_sr_modify_protection(nor, status, lock_bits);
+ }
+ else
+ dev_err(nor->dev, "trying to unlock already locked area\n");
+err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
return ret;
}
@@ -1368,13 +1674,42 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
+ uint8_t status;
+ uint8_t lock_bits;
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
if (ret)
return ret;
+ if (nor->isparallel == 1)
+ ofs = ofs >> nor->shift;
+
+ if (nor->isstacked == 1) {
+ if (ofs >= (mtd->size / 2)) {
+ ofs = ofs - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
+
ret = nor->flash_unlock(nor, ofs, len);
+ /* Wait until finished previous command */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto err;
+
+ status = read_sr(nor);
+ lock_bits = min_protected_area_including_offset(nor, ofs+len) - 1;
+
+ /* Only modify protection if it will not lock other areas */
+ if (lock_bits < bp_bits_from_sr(nor, status)) {
+ nor->is_lock = 0;
+ ret = write_sr_modify_protection(nor, status, lock_bits);
+ }
+ else
+ dev_err(nor->dev, "trying to lock already unlocked area\n");
+err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
return ret;
}
@@ -1901,6 +2236,28 @@ static const struct flash_info spi_nor_ids[] = {
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
/* ISSI */
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp080d", INFO(0x9d7014, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp016d", INFO(0x9d7015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp032d", INFO(0x9d6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp032d", INFO(0x9d7016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp064a", INFO(0x9d6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp064a", INFO(0x9d7017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp128f", INFO(0x9d6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp128f", INFO(0x9d7018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25lp256d", INFO(0x9d6019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp256d", INFO(0x9d7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
+ SPI_NOR_4B_OPCODES) },
+ { "is25lp512m", INFO(0x9d601a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp512m", INFO(0x9d701a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
+ SPI_NOR_4B_OPCODES) },
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
{ "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
@@ -1953,6 +2310,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx66u1g45g", INFO(0xc2253b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
/* Micron <--> ST Micro */
@@ -1961,15 +2319,18 @@ static const struct flash_info spi_nor_ids[] = {
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
+ { "n25q256a", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ | USE_FSR| SPI_NOR_HAS_LOCK) },
+ { "n25q256a13", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
{ "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
- { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
- { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "n25q512a13", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR | SPI_NOR_HAS_LOCK) },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | NO_CHIP_ERASE) },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | NO_CHIP_ERASE) },
+ { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | NO_CHIP_ERASE) },
+ { "mt25ul02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | NO_CHIP_ERASE) },
/* Micron */
{
@@ -1992,17 +2353,18 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
+ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | USE_CLSR) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
{ "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | USE_CLSR) },
{ "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
- { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
- { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
- { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
- { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, SPI_NOR_HAS_LOCK) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, SPI_NOR_HAS_LOCK) },
+ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_QUAD_READ | USE_CLSR) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | USE_CLSR) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | USE_CLSR) },
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
@@ -2020,6 +2382,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "sst26wf016B", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K | SST_GLOBAL_PROT_UNLK) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -2118,7 +2481,7 @@ static const struct flash_info spi_nor_ids[] = {
},
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
@@ -2171,21 +2534,71 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- ssize_t ret;
-
+ int ret;
+ u32 offset = from;
+ u32 stack_shift = 0;
+ u32 read_len = 0;
+ u32 rem_bank_len = 0;
+ u8 bank;
+ u8 is_ofst_odd = 0;
+ loff_t addr = 0;
+
+#define OFFSET_16_MB 0x1000000
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+ if ((nor->isparallel) && (offset & 1)) {
+ /* We can hit this case when we use file system like ubifs */
+ from = (loff_t)(from - 1);
+ len = (size_t)(len + 1);
+ is_ofst_odd = 1;
+ }
+
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
if (ret)
return ret;
while (len) {
- loff_t addr = from;
+ if (nor->addr_width == 3) {
+ bank = (u32)from / (OFFSET_16_MB << nor->shift);
+ rem_bank_len = ((OFFSET_16_MB << nor->shift) *
+ (bank + 1)) - from;
+ }
+ offset = from;
+
+ if (nor->isparallel == 1)
+ offset /= 2;
+
+ if (nor->isstacked == 1) {
+ stack_shift = 1;
+ if (offset >= (mtd->size / 2)) {
+ offset = offset - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
+ }
+
+ /* Die cross over issue is not handled */
+ if (nor->addr_width == 4) {
+ rem_bank_len = (mtd->size >> stack_shift) -
+ (offset << nor->shift);
+ }
+ if (nor->addr_width == 3)
+ write_ear(nor, offset);
+ if (len < rem_bank_len)
+ read_len = len;
+ else
+ read_len = rem_bank_len;
+
+ /* Wait till previous write/erase is done. */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto read_err;
if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
- addr = spi_nor_s3an_addr_convert(nor, addr);
+ addr = spi_nor_s3an_addr_convert(nor, offset);
- ret = nor->read(nor, addr, len, buf);
+ ret = nor->read(nor, offset, read_len, buf);
if (ret == 0) {
/* We shouldn't see 0-length reads */
ret = -EIO;
@@ -2195,7 +2608,12 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
goto read_err;
WARN_ON(ret > len);
- *retlen += ret;
+ if (is_ofst_odd == 1) {
+ memcpy(buf, (buf + 1), (len - 1));
+ *retlen += (ret - 1);
+ } else {
+ *retlen += ret;
+ }
buf += ret;
from += ret;
len -= ret;
@@ -2297,41 +2715,93 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_nor *nor = mtd_to_spi_nor(mtd);
size_t page_offset, page_remain, i;
ssize_t ret;
+ u32 offset, stack_shift=0;
+ u8 bank = 0;
+ u32 rem_bank_len = 0;
+
+#define OFFSET_16_MB 0x1000000
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+ /*
+ * Cannot write to odd offset in parallel mode,
+ * so write 2 bytes first
+ */
+ if ((nor->isparallel) && (to & 1)) {
+
+ u8 two[2] = {0xff, buf[0]};
+ size_t local_retlen;
+
+ ret = spi_nor_write(mtd, to & ~1, 2, &local_retlen, two);
+ if (ret < 0)
+ return ret;
+
+ *retlen += 1; /* We've written only one actual byte */
+ ++buf;
+ --len;
+ ++to;
+ }
+
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
if (ret)
return ret;
-
for (i = 0; i < len; ) {
ssize_t written;
loff_t addr = to + i;
- /*
- * If page_size is a power of two, the offset can be quickly
- * calculated with an AND operation. On the other cases we
- * need to do a modulus operation (more expensive).
- * Power of two numbers have only one bit set and we can use
- * the instruction hweight32 to detect if we need to do a
- * modulus (do_div()) or not.
- */
- if (hweight32(nor->page_size) == 1) {
- page_offset = addr & (nor->page_size - 1);
- } else {
- uint64_t aux = addr;
+ if (nor->addr_width == 3) {
+ bank = (u32)to / (OFFSET_16_MB << nor->shift);
+ rem_bank_len = ((OFFSET_16_MB << nor->shift) *
+ (bank + 1)) - to;
+ }
+
+ page_offset = ((to + i)) & (nor->page_size - 1);
+
+ offset = (to + i);
- page_offset = do_div(aux, nor->page_size);
+ if (nor->isparallel == 1)
+ offset /= 2;
+
+ if (nor->isstacked == 1) {
+ stack_shift = 1;
+ if (offset >= (mtd->size / 2)) {
+ offset = offset - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
}
- /* the size of data remaining on the first page */
- page_remain = min_t(size_t,
- nor->page_size - page_offset, len - i);
+
+ /* Die cross over issue is not handled */
+ if (nor->addr_width == 4)
+ rem_bank_len = (mtd->size >> stack_shift) - offset;
+ if (nor->addr_width == 3)
+ write_ear(nor, offset);
+ if (nor->isstacked == 1) {
+ if (len <= rem_bank_len) {
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
+ } else {
+ /*
+ * the size of data remaining
+ * on the first page
+ */
+ page_remain = rem_bank_len;
+ }
+ } else {
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
+ }
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
if (nor->flags & SNOR_F_S3AN_ADDR_DEFAULT)
addr = spi_nor_s3an_addr_convert(nor, addr);
write_enable(nor);
- ret = nor->write(nor, addr, page_remain, buf + i);
+
+ ret = nor->write(nor, (offset), page_remain, buf + i);
if (ret < 0)
goto write_err;
written = ret;
@@ -2920,6 +3390,9 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
}
params->size >>= 3; /* Convert to bytes. */
+ if (params->size > 0x1000000 && nor->addr_width == 3)
+ return -EINVAL;
+
/* Fast Read settings. */
for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
@@ -3708,9 +4181,15 @@ static int spi_nor_init_params(struct spi_nor *nor,
}
/* Page Program settings. */
- params->hwcaps.mask |= SNOR_HWCAPS_PP;
- spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
- SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+ if (nor->spi->mode & SPI_TX_QUAD) {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
+ } else {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+ }
/*
* Sector Erase settings. Sort Erase Types in ascending order, with the
@@ -3739,6 +4218,7 @@ static int spi_nor_init_params(struct spi_nor *nor,
SNOR_HWCAPS_PP_QUAD)) {
switch (JEDEC_MFR(info)) {
case SNOR_MFR_MACRONIX:
+ case SNOR_MFR_ISSI:
params->quad_enable = macronix_quad_enable;
break;
@@ -4075,12 +4555,14 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
struct spi_nor_flash_parameter params;
- const struct flash_info *info = NULL;
+ struct flash_info *info = NULL;
struct device *dev = nor->dev;
struct mtd_info *mtd = &nor->mtd;
struct device_node *np = spi_nor_get_flash_node(nor);
+ struct device_node *np_spi;
int ret;
int i;
+ u32 is_dual;
ret = spi_nor_check(nor);
if (ret)
@@ -4092,10 +4574,10 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
nor->write_proto = SNOR_PROTO_1_1_1;
if (name)
- info = spi_nor_match_id(name);
+ info = (struct flash_info *)spi_nor_match_id(name);
/* Try to auto-detect if chip name wasn't specified or not found */
if (!info)
- info = spi_nor_read_id(nor);
+ info = (struct flash_info *)spi_nor_read_id(nor);
if (IS_ERR_OR_NULL(info))
return -ENOENT;
@@ -4119,7 +4601,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
*/
dev_warn(dev, "found %s, expected %s\n",
jinfo->name, info->name);
- info = jinfo;
+ info = (struct flash_info *)jinfo;
}
}
@@ -4150,6 +4632,25 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (ret)
return ret;
+ /*
+ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+ * with the software protection bits set
+ */
+
+ if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(info) == SNOR_MFR_INTEL ||
+ JEDEC_MFR(info) == SNOR_MFR_SST ||
+ info->flags & SPI_NOR_HAS_LOCK) {
+ write_enable(nor);
+ write_sr(nor, 0);
+ if (info->flags & SST_GLOBAL_PROT_UNLK) {
+ write_enable(nor);
+ /* Unlock global write protection bits */
+ nor->write_reg(nor, GLOBAL_BLKPROT_UNLK, NULL, 0);
+ }
+ spi_nor_wait_till_ready(nor);
+ }
+
if (!mtd->name)
mtd->name = dev_name(dev);
mtd->priv = nor;
@@ -4159,6 +4660,73 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->size = params.size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
+#ifdef CONFIG_OF
+ np_spi = of_get_next_parent(np);
+ if ((of_property_match_string(np_spi, "compatible",
+ "xlnx,zynq-qspi-1.0") >= 0) ||
+ (of_property_match_string(np_spi, "compatible",
+ "xlnx,zynqmp-qspi-1.0") >= 0)) {
+ if (of_property_read_u32(np_spi, "is-dual",
+ &is_dual) < 0) {
+ /* Default to single if prop not defined */
+ nor->shift = 0;
+ nor->isstacked = 0;
+ nor->isparallel = 0;
+ } else {
+ if (is_dual == 1) {
+ /* dual parallel */
+ nor->shift = 1;
+ info->sector_size <<= nor->shift;
+ info->page_size <<= nor->shift;
+ mtd->size <<= nor->shift;
+ nor->isparallel = 1;
+ nor->isstacked = 0;
+ nor->spi->master->flags |=
+ (SPI_MASTER_DATA_STRIPE
+ | SPI_MASTER_BOTH_CS);
+ } else {
+#ifdef CONFIG_SPI_ZYNQ_QSPI_DUAL_STACKED
+ /* dual stacked */
+ nor->shift = 0;
+ mtd->size <<= 1;
+ info->n_sectors <<= 1;
+ nor->isstacked = 1;
+ nor->isparallel = 0;
+#else
+ u32 is_stacked;
+ if (of_property_read_u32(np_spi,
+ "is-stacked",
+ &is_stacked) < 0) {
+ is_stacked = 0;
+ }
+ if (is_stacked) {
+ /* dual stacked */
+ nor->shift = 0;
+ mtd->size <<= 1;
+ info->n_sectors <<= 1;
+ nor->isstacked = 1;
+ nor->isparallel = 0;
+ } else {
+ /* single */
+ nor->shift = 0;
+ nor->isstacked = 0;
+ nor->isparallel = 0;
+ }
+#endif
+ }
+ }
+ }
+#if 0
+ pr_info("parallel %d stacked %d shift %d mtsize %d\n",
+ nor->isparallel, nor->isstacked, nor->shift, mtd->size);
+#endif
+#else
+ /* Default to single */
+ nor->shift = 0;
+ nor->isstacked = 0;
+ nor->isparallel = 0;
+#endif
+
mtd->_resume = spi_nor_resume;
/* NOR protection support for STmicro/Micron chips and similar */
@@ -4191,9 +4759,22 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (info->flags & USE_CLSR)
nor->flags |= SNOR_F_USE_CLSR;
+ if (nor->shift)
+ mtd->erasesize = info->sector_size;
+
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+ if (nor->shift &&
+ (info->flags & SECT_4K ||
+ info->flags & SECT_4K_PMC)) {
+ mtd->erasesize = 4096 << nor->shift;
+ }
+#endif
+
if (info->flags & SPI_NOR_NO_ERASE)
mtd->flags |= MTD_NO_ERASE;
+ nor->jedec_id = info->id[0];
mtd->dev.parent = dev;
nor->page_size = params.page_size;
mtd->writebufsize = nor->page_size;
@@ -4232,19 +4813,64 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
} else if (info->addr_width) {
nor->addr_width = info->addr_width;
} else if (mtd->size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
+#ifdef CONFIG_OF
+ np_spi = of_get_next_parent(np);
+ if (of_property_match_string(np_spi, "compatible",
+ "xlnx,zynq-qspi-1.0") >= 0) {
+ int status;
+
+ nor->addr_width = 3;
+ set_4byte(nor, false);
+ status = read_ear(nor, info);
+ if (status < 0)
+ dev_warn(dev, "failed to read ear reg\n");
+ else
+ nor->curbank = status & EAR_SEGMENT_MASK;
+ } else {
+#endif
+ /*
+ * enable 4-byte addressing
+ * if the device exceeds 16MiB
+ */
+ nor->addr_width = 4;
+ if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
+ info->flags & SPI_NOR_4B_OPCODES)
+ spi_nor_set_4byte_opcodes(nor);
+ else {
+ np_spi = of_get_next_parent(np);
+ if (of_property_match_string(np_spi,
+ "compatible",
+ "xlnx,xps-spi-2.00.a") >= 0) {
+ nor->addr_width = 3;
+ set_4byte(nor, false);
+ } else {
+ set_4byte(nor, true);
+ if (nor->isstacked) {
+ nor->spi->master->flags |=
+ SPI_MASTER_U_PAGE;
+ set_4byte(nor, true);
+ nor->spi->master->flags &=
+ ~SPI_MASTER_U_PAGE;
+ }
+ }
+ }
+#ifdef CONFIG_OF
+ }
+#endif
} else {
nor->addr_width = 3;
}
- if (info->flags & SPI_NOR_4B_OPCODES ||
- (JEDEC_MFR(info) == SNOR_MFR_SPANSION && mtd->size > SZ_16M))
- nor->flags |= SNOR_F_4B_OPCODES;
+ if (of_property_match_string(np_spi, "compatible",
+ "xlnx,zynq-qspi-1.0") < 0) {
+ if (info->flags & SPI_NOR_4B_OPCODES ||
+ (JEDEC_MFR(info) == SNOR_MFR_SPANSION && mtd->size > SZ_16M))
+ nor->flags |= SNOR_F_4B_OPCODES;
- if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
- !(nor->flags & SNOR_F_HAS_4BAIT))
- spi_nor_set_4byte_opcodes(nor);
+ if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ !(nor->flags & SNOR_F_HAS_4BAIT))
+ spi_nor_set_4byte_opcodes(nor);
+ }
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
dev_err(dev, "address width is too large: %u\n",
@@ -4286,6 +4912,14 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
}
EXPORT_SYMBOL_GPL(spi_nor_scan);
+void spi_nor_shutdown(struct spi_nor *nor)
+{
+ if (nor->addr_width == 3 &&
+ (nor->mtd.size >> nor->shift) > 0x1000000)
+ write_ear(nor, 0);
+}
+EXPORT_SYMBOL_GPL(spi_nor_shutdown);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
MODULE_AUTHOR("Mike Lavender");
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 7bce1ae7abeb..11bdf6d626e0 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -50,6 +50,10 @@ enum xcan_reg {
XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
/* only on CAN FD cores */
+ XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate
+ * Prescalar
+ */
+ XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
@@ -64,6 +68,7 @@ enum xcan_reg {
#define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
#define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
#define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
+#define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08)
#define XCAN_CANFD_FRAME_SIZE 0x48
#define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
@@ -120,8 +125,12 @@ enum xcan_reg {
#define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
#define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
#define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
+#define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */
#define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
#define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
+#define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */
+#define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */
+#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
@@ -135,6 +144,7 @@ enum xcan_reg {
/* CAN frame length constants */
#define XCAN_FRAME_MAX_DATA_LEN 8
+#define XCANFD_DW_BYTES 4
#define XCAN_TIMEOUT (1 * HZ)
/* TX-FIFO-empty interrupt available */
@@ -151,7 +161,15 @@ enum xcan_reg {
#define XCAN_FLAG_RX_FIFO_MULTI 0x0010
#define XCAN_FLAG_CANFD_2 0x0020
+enum xcan_ip_type {
+ XAXI_CAN = 0,
+ XZYNQ_CANPS,
+ XAXI_CANFD,
+ XAXI_CANFD_2_0,
+};
+
struct xcan_devtype_data {
+ enum xcan_ip_type cantype;
unsigned int flags;
const struct can_bittiming_const *bittiming_const;
const char *bus_clk_name;
@@ -175,6 +193,8 @@ struct xcan_devtype_data {
* @bus_clk: Pointer to struct clk
* @can_clk: Pointer to struct clk
* @devtype: Device type specific constants
+ * @cfd: Variable to struct canfd_frame
+ * @is_canfd: For checking canfd or not
*/
struct xcan_priv {
struct can_priv can;
@@ -185,13 +205,15 @@ struct xcan_priv {
struct napi_struct napi;
u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
- u32 val);
+ u32 val);
struct device *dev;
void __iomem *reg_base;
unsigned long irq_flags;
struct clk *bus_clk;
struct clk *can_clk;
struct xcan_devtype_data devtype;
+ struct canfd_frame cfd;
+ bool is_canfd;
};
/* CAN Bittiming constants as per Xilinx CAN specs */
@@ -207,6 +229,7 @@ static const struct can_bittiming_const xcan_bittiming_const = {
.brp_inc = 1,
};
+/* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */
static const struct can_bittiming_const xcan_bittiming_const_canfd = {
.name = DRIVER_NAME,
.tseg1_min = 1,
@@ -219,6 +242,20 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = {
.brp_inc = 1,
};
+/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
+static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
+ .name = DRIVER_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 8,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+/* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */
static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.name = DRIVER_NAME,
.tseg1_min = 1,
@@ -231,6 +268,19 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.brp_inc = 1,
};
+/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
+static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
+ .name = DRIVER_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -240,7 +290,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
* Write data to the paricular CAN register
*/
static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
- u32 val)
+ u32 val)
{
iowrite32(val, priv->reg_base + reg);
}
@@ -267,7 +317,7 @@ static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
* Write data to the paricular CAN register
*/
static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
- u32 val)
+ u32 val)
{
iowrite32be(val, priv->reg_base + reg);
}
@@ -345,6 +395,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct can_bittiming *bt = &priv->can.bittiming;
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
u32 btr0, btr1;
u32 is_config_mode;
@@ -374,9 +425,27 @@ static int xcan_set_bittiming(struct net_device *ndev)
priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
+ if (priv->devtype.cantype == XAXI_CANFD ||
+ priv->devtype.cantype == XAXI_CANFD_2_0) {
+ /* Setting Baud Rate prescalar value in F_BRPR Register */
+ btr0 = dbt->brp - 1;
+
+ /* Setting Time Segment 1 in BTR Register */
+ btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
+
+ /* Setting Time Segment 2 in BTR Register */
+ btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
+
+ /* Setting Synchronous jump width in BTR Register */
+ btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
+
+ priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
+ priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
+ }
+
netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
- priv->read_reg(priv, XCAN_BRPR_OFFSET),
- priv->read_reg(priv, XCAN_BTR_OFFSET));
+ priv->read_reg(priv, XCAN_BRPR_OFFSET),
+ priv->read_reg(priv, XCAN_BTR_OFFSET));
return 0;
}
@@ -394,9 +463,8 @@ static int xcan_set_bittiming(struct net_device *ndev)
static int xcan_chip_start(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
- u32 reg_msr, reg_sr_mask;
+ u32 reg_msr;
int err;
- unsigned long timeout;
u32 ier;
/* Check if it is in reset mode */
@@ -422,10 +490,8 @@ static int xcan_chip_start(struct net_device *ndev)
/* Check whether it is loopback mode or normal mode */
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
reg_msr = XCAN_MSR_LBACK_MASK;
- reg_sr_mask = XCAN_SR_LBACK_MASK;
} else {
reg_msr = 0x0;
- reg_sr_mask = XCAN_SR_NORMAL_MASK;
}
/* enable the first extended filter, if any, as cores with extended
@@ -437,16 +503,8 @@ static int xcan_chip_start(struct net_device *ndev)
priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
- timeout = jiffies + XCAN_TIMEOUT;
- while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
- if (time_after(jiffies, timeout)) {
- netdev_warn(ndev,
- "timed out for correct mode\n");
- return -ETIMEDOUT;
- }
- }
netdev_dbg(ndev, "status:#x%08x\n",
- priv->read_reg(priv, XCAN_SR_OFFSET));
+ priv->read_reg(priv, XCAN_SR_OFFSET));
priv->can.state = CAN_STATE_ERROR_ACTIVE;
return 0;
@@ -485,14 +543,15 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
/**
* xcan_write_frame - Write a frame to HW
- * @skb: sk_buff pointer that contains data to be Txed
+ * @priv: Driver private data structure
+ * @cf: canfd_frame pointer that contains data to be Txed
* @frame_offset: Register offset to write the frame to
*/
-static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
+static void xcan_write_frame(struct xcan_priv *priv, struct canfd_frame *cf,
int frame_offset)
{
u32 id, dlc, data[2] = {0, 0};
- struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 ramoff, dwindex = 0, i;
/* Watch carefully on the bit sequence */
if (cf->can_id & CAN_EFF_FLAG) {
@@ -500,7 +559,7 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
XCAN_IDR_ID2_MASK;
id |= (((cf->can_id & CAN_EFF_MASK) >>
- (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
+ (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
/* The substibute remote TX request bit should be "1"
@@ -521,31 +580,51 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
id |= XCAN_IDR_SRR_MASK;
}
- dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
-
- if (cf->can_dlc > 0)
- data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
- if (cf->can_dlc > 4)
- data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
+ dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
+ if (priv->is_canfd) {
+ if (cf->flags & CANFD_BRS)
+ dlc |= XCAN_DLCR_BRS_MASK;
+ dlc |= XCAN_DLCR_EDL_MASK;
+ }
priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
/* If the CAN frame is RTR frame this write triggers transmission
* (not on CAN FD)
*/
priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
- if (!(cf->can_id & CAN_RTR_FLAG)) {
- priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset),
- data[0]);
- /* If the CAN frame is Standard/Extended frame this
- * write triggers transmission (not on CAN FD)
- */
- priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset),
- data[1]);
+ if (priv->devtype.cantype == XAXI_CANFD ||
+ priv->devtype.cantype == XAXI_CANFD_2_0) {
+ for (i = 0; i < cf->len; i += 4) {
+ ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
+ (dwindex * XCANFD_DW_BYTES);
+ priv->write_reg(priv, ramoff,
+ be32_to_cpup((__be32 *)(cf->data + i)));
+ dwindex++;
+ }
+ } else {
+ if (cf->len > 0)
+ data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
+ if (cf->len > 4)
+ data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
+
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ priv->write_reg(priv,
+ XCAN_FRAME_DW1_OFFSET(frame_offset),
+ data[0]);
+ /* If the CAN frame is Standard/Extended frame this
+ * write triggers transmission (not on CAN FD)
+ */
+ priv->write_reg(priv,
+ XCAN_FRAME_DW2_OFFSET(frame_offset),
+ data[1]);
+ }
}
}
/**
* xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
*
* Return: 0 on success, -ENOSPC if FIFO is full.
*/
@@ -554,6 +633,9 @@ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
struct xcan_priv *priv = netdev_priv(ndev);
unsigned long flags;
+ priv->cfd = *((struct canfd_frame *)skb->data);
+ priv->is_canfd = can_is_canfd_skb(skb);
+
/* Check if the TX buffer is full */
if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
XCAN_SR_TXFLL_MASK))
@@ -565,7 +647,7 @@ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
priv->tx_head++;
- xcan_write_frame(priv, skb, XCAN_TXFIFO_OFFSET);
+ xcan_write_frame(priv, &priv->cfd, XCAN_TXFIFO_OFFSET);
/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
if (priv->tx_max > 1)
@@ -582,6 +664,8 @@ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
/**
* xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
*
* Return: 0 on success, -ENOSPC if there is no space
*/
@@ -590,6 +674,9 @@ static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
struct xcan_priv *priv = netdev_priv(ndev);
unsigned long flags;
+ priv->cfd = *((struct canfd_frame *)skb->data);
+ priv->is_canfd = can_is_canfd_skb(skb);
+
if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
BIT(XCAN_TX_MAILBOX_IDX)))
return -ENOSPC;
@@ -600,7 +687,7 @@ static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
priv->tx_head++;
- xcan_write_frame(priv, skb,
+ xcan_write_frame(priv, &priv->cfd,
XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
/* Mark buffer as ready for transmit */
@@ -714,6 +801,88 @@ static int xcan_rx(struct net_device *ndev, int frame_base)
}
/**
+ * xcanfd_rx - Is called from CAN isr to complete the received
+ * frame processing
+ * @ndev: Pointer to net_device structure
+ * @frame_base: Register offset to the frame to be read
+ *
+ * This function is invoked from the CAN isr(poll) to process the Rx frames. It
+ * does minimal processing and invokes "netif_receive_skb" to complete further
+ * processing.
+ * Return: 1 on success and 0 on failure.
+ */
+static int xcanfd_rx(struct net_device *ndev, int frame_base)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
+
+ id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
+ dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
+ if (dlc & XCAN_DLCR_EDL_MASK)
+ skb = alloc_canfd_skb(ndev, &cf);
+ else
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
+
+ if (unlikely(!skb)) {
+ stats->rx_dropped++;
+ return 0;
+ }
+
+ /* Change Xilinx CANFD data length format to socketCAN data
+ * format
+ */
+ if (dlc & XCAN_DLCR_EDL_MASK)
+ cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
+ XCAN_DLCR_DLC_SHIFT);
+ else
+ cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >>
+ XCAN_DLCR_DLC_SHIFT);
+
+ /* Change Xilinx CAN ID format to socketCAN ID format */
+ if (id_xcan & XCAN_IDR_IDE_MASK) {
+ /* The received frame is an Extended format frame */
+ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
+ cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
+ XCAN_IDR_ID2_SHIFT;
+ cf->can_id |= CAN_EFF_FLAG;
+ if (id_xcan & XCAN_IDR_RTR_MASK)
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ /* The received frame is a standard format frame */
+ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
+ XCAN_IDR_ID1_SHIFT;
+ if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
+ XCAN_IDR_SRR_MASK))
+ cf->can_id |= CAN_RTR_FLAG;
+ }
+
+ /* Check the frame received is FD or not*/
+ if (dlc & XCAN_DLCR_EDL_MASK) {
+ for (i = 0; i < cf->len; i += 4) {
+ dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
+ (dwindex * XCANFD_DW_BYTES);
+ data[0] = priv->read_reg(priv, dw_offset);
+ *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
+ dwindex++;
+ }
+ } else {
+ for (i = 0; i < cf->len; i += 4) {
+ dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
+ data[0] = priv->read_reg(priv, dw_offset + i);
+ *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
+ }
+ }
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
+/**
* xcan_current_error_state - Get current error state from HW
* @ndev: Pointer to net_device structure
*
@@ -936,7 +1105,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
}
netdev_dbg(ndev, "%s: error status register:0x%x\n",
- __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
+ __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
}
/**
@@ -962,6 +1131,7 @@ static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
/**
* xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
+ * @priv: Driver private data structure
*
* Return: Register offset of the next frame in RX FIFO.
*/
@@ -970,7 +1140,7 @@ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
int offset;
if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
- u32 fsr;
+ u32 fsr, mask;
/* clear RXOK before the is-empty check so that any newly
* received frame will reassert it without a race
@@ -980,13 +1150,20 @@ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
/* check if RX FIFO is empty */
- if (!(fsr & XCAN_FSR_FL_MASK))
+ if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
+ mask = XCAN_2_FSR_FL_MASK;
+ else
+ mask = XCAN_FSR_FL_MASK;
+
+ if (!(fsr & mask))
return -ENOENT;
if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
- offset = XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
+ offset =
+ XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
else
- offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
+ offset =
+ XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
} else {
/* check if RX FIFO is empty */
@@ -1021,7 +1198,10 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
(work_done < quota)) {
- work_done += xcan_rx(ndev, frame_offset);
+ if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
+ work_done += xcanfd_rx(ndev, frame_offset);
+ else
+ work_done += xcan_rx(ndev, frame_offset);
if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
/* increment read index */
@@ -1096,8 +1276,10 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
* via TXFEMP handling as we read TXFEMP *after* TXOK
* clear to satisfy (1).
*/
- while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
- priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ while ((isr & XCAN_IXR_TXOK_MASK) &&
+ !WARN_ON(++retries == 100)) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET,
+ XCAN_IXR_TXOK_MASK);
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
@@ -1210,12 +1392,12 @@ static int xcan_open(struct net_device *ndev)
ret = pm_runtime_get_sync(priv->dev);
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
- __func__, ret);
+ __func__, ret);
return ret;
}
ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
- ndev->name, ndev);
+ ndev->name, ndev);
if (ret < 0) {
netdev_err(ndev, "irq allocation for CAN failed\n");
goto err;
@@ -1286,7 +1468,7 @@ static int xcan_close(struct net_device *ndev)
* Return: 0 on success and failure value on error
*/
static int xcan_get_berr_counter(const struct net_device *ndev,
- struct can_berr_counter *bec)
+ struct can_berr_counter *bec)
{
struct xcan_priv *priv = netdev_priv(ndev);
int ret;
@@ -1294,7 +1476,7 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
ret = pm_runtime_get_sync(priv->dev);
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
- __func__, ret);
+ __func__, ret);
return ret;
}
@@ -1307,7 +1489,6 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
return 0;
}
-
static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
@@ -1419,6 +1600,8 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
};
static const struct xcan_devtype_data xcan_zynq_data = {
+ .cantype = XZYNQ_CANPS,
+ .flags = XCAN_FLAG_TXFEMP,
.bittiming_const = &xcan_bittiming_const,
.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
@@ -1426,6 +1609,8 @@ static const struct xcan_devtype_data xcan_zynq_data = {
};
static const struct xcan_devtype_data xcan_axi_data = {
+ .cantype = XAXI_CAN,
+ .flags = XCAN_FLAG_TXFEMP,
.bittiming_const = &xcan_bittiming_const,
.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
@@ -1433,6 +1618,7 @@ static const struct xcan_devtype_data xcan_axi_data = {
};
static const struct xcan_devtype_data xcan_canfd_data = {
+ .cantype = XAXI_CANFD,
.flags = XCAN_FLAG_EXT_FILTERS |
XCAN_FLAG_RXMNF |
XCAN_FLAG_TX_MAILBOXES |
@@ -1444,6 +1630,7 @@ static const struct xcan_devtype_data xcan_canfd_data = {
};
static const struct xcan_devtype_data xcan_canfd2_data = {
+ .cantype = XAXI_CANFD_2_0,
.flags = XCAN_FLAG_EXT_FILTERS |
XCAN_FLAG_RXMNF |
XCAN_FLAG_TX_MAILBOXES |
@@ -1556,6 +1743,19 @@ static int xcan_probe(struct platform_device *pdev)
priv->can.do_get_berr_counter = xcan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
+
+ if (devtype->cantype == XAXI_CANFD)
+ priv->can.data_bittiming_const =
+ &xcan_data_bittiming_const_canfd;
+
+ if (devtype->cantype == XAXI_CANFD_2_0)
+ priv->can.data_bittiming_const =
+ &xcan_data_bittiming_const_canfd2;
+
+ if (devtype->cantype == XAXI_CANFD ||
+ devtype->cantype == XAXI_CANFD_2_0)
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+
priv->reg_base = addr;
priv->tx_max = tx_max;
priv->devtype = *devtype;
@@ -1572,14 +1772,16 @@ static int xcan_probe(struct platform_device *pdev)
/* Getting the CAN can_clk info */
priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
if (IS_ERR(priv->can_clk)) {
- dev_err(&pdev->dev, "Device clock not found.\n");
+ if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Device clock not found.\n");
ret = PTR_ERR(priv->can_clk);
goto err_free;
}
priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
if (IS_ERR(priv->bus_clk)) {
- dev_err(&pdev->dev, "bus clock not found\n");
+ if (PTR_ERR(priv->bus_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "bus clock not found\n");
ret = PTR_ERR(priv->bus_clk);
goto err_free;
}
@@ -1591,7 +1793,7 @@ static int xcan_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
- __func__, ret);
+ __func__, ret);
goto err_pmdisable;
}
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 6ff123da6a14..5e0649bc44fd 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -79,6 +79,7 @@
#define GEM_NCFGR 0x0004 /* Network Config */
#define GEM_USRIO 0x000c /* User IO */
#define GEM_DMACFG 0x0010 /* DMA Configuration */
+#define GEM_PBUFRXCUT 0x0044 /* RX Partial Store and Forward */
#define GEM_JML 0x0048 /* Jumbo Max Length */
#define GEM_HRB 0x0080 /* Hash Bottom */
#define GEM_HRT 0x0084 /* Hash Top */
@@ -90,6 +91,9 @@
#define GEM_SA3T 0x009C /* Specific3 Top */
#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
#define GEM_SA4T 0x00A4 /* Specific4 Top */
+#define GEM_WOL 0x00B8 /* Wake on LAN */
+#define GEM_RXPTPUNI 0x00D4 /* PTP RX Unicast address */
+#define GEM_TXPTPUNI 0x00D8 /* PTP TX Unicast address */
#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
@@ -155,6 +159,7 @@
#define GEM_PEFTN 0x01f4 /* PTP Peer Event Frame Tx Ns */
#define GEM_PEFRSL 0x01f8 /* PTP Peer Event Frame Rx Sec Low */
#define GEM_PEFRN 0x01fc /* PTP Peer Event Frame Rx Ns */
+#define GEM_PCSCNTRL 0x0200 /* PCS Control */
#define GEM_DCFG1 0x0280 /* Design Config 1 */
#define GEM_DCFG2 0x0284 /* Design Config 2 */
#define GEM_DCFG3 0x0288 /* Design Config 3 */
@@ -227,6 +232,8 @@
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
#define MACB_SRTSM_OFFSET 15
+#define MACB_PTPUNI_OFFSET 20
+#define MACB_PTPUNI_SIZE 1
#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
#define MACB_OSSMODE_SIZE 1
@@ -314,6 +321,11 @@
#define GEM_ADDR64_OFFSET 30 /* Address bus width - 64b or 32b */
#define GEM_ADDR64_SIZE 1
+/* Bitfields in PBUFRXCUT */
+#define GEM_WTRMRK_OFFSET 0 /* Watermark value offset */
+#define GEM_WTRMRK_SIZE 12
+#define GEM_ENCUTTHRU_OFFSET 31 /* Enable RX partial store and forward */
+#define GEM_ENCUTTHRU_SIZE 1
/* Bitfields in NSR */
#define MACB_NSR_LINK_OFFSET 0 /* pcs_link_state */
@@ -376,7 +388,7 @@
#define MACB_PFR_SIZE 1
#define MACB_PTZ_OFFSET 13 /* Enable pause time zero interrupt */
#define MACB_PTZ_SIZE 1
-#define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */
+#define MACB_WOL_OFFSET 28 /* Enable WOL received interrupt */
#define MACB_WOL_SIZE 1
#define MACB_DRQFR_OFFSET 18 /* PTP Delay Request Frame Received */
#define MACB_DRQFR_SIZE 1
@@ -455,6 +467,10 @@
#define MACB_REV_OFFSET 0
#define MACB_REV_SIZE 16
+/* Bitfields in PCSCNTRL */
+#define GEM_PCSAUTONEG_OFFSET 12
+#define GEM_PCSAUTONEG_SIZE 1
+
/* Bitfields in DCFG1. */
#define GEM_IRQCOR_OFFSET 23
#define GEM_IRQCOR_SIZE 1
@@ -467,7 +483,6 @@
#define GEM_TX_PKT_BUFF_OFFSET 21
#define GEM_TX_PKT_BUFF_SIZE 1
-
/* Bitfields in DCFG5. */
#define GEM_TSU_OFFSET 8
#define GEM_TSU_SIZE 1
@@ -496,7 +511,11 @@
/* Bitfields in TISUBN */
#define GEM_SUBNSINCR_OFFSET 0
-#define GEM_SUBNSINCR_SIZE 16
+#define GEM_SUBNSINCRL_OFFSET 24
+#define GEM_SUBNSINCRL_SIZE 8
+#define GEM_SUBNSINCRH_OFFSET 0
+#define GEM_SUBNSINCRH_SIZE 16
+#define GEM_SUBNSINCR_SIZE 24
/* Bitfields in TI */
#define GEM_NSINCR_OFFSET 0
@@ -640,7 +659,10 @@
#define MACB_CAPS_JUMBO 0x00000020
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
+#define MACB_CAPS_PCS 0x00000400
+#define MACB_CAPS_PARTIAL_STORE_FORWARD 0x00000800
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
+#define MACB_CAPS_WOL 0x00000200
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -834,6 +856,9 @@ struct gem_tx_ts {
/* limit RX checksum offload to TCP and UDP packets */
#define GEM_RX_CSUM_CHECKED_MASK 2
+/* Scaled PPM fraction */
+#define PPM_FRACTION 16
+
/* struct macb_tx_skb - data about an skb which is being transmitted
* @skb: skb currently being transmitted, only set for the last buffer
* of the frame
@@ -1152,6 +1177,7 @@ struct macb {
u32 (*macb_reg_readl)(struct macb *bp, int offset);
void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
+ struct macb_dma_desc *rx_ring_tieoff;
size_t rx_buffer_size;
unsigned int rx_ring_size;
@@ -1174,6 +1200,8 @@ struct macb {
struct gem_stats gem;
} hw_stats;
+ dma_addr_t rx_ring_tieoff_dma;
+
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
@@ -1200,6 +1228,9 @@ struct macb {
u32 wol;
+ /* holds value of rx watermark value for pbuf_rxcutthru register */
+ u16 rx_watermark;
+
struct macb_ptp_info *ptp_info; /* macb-ptp interface */
#ifdef MACB_EXT_DESC
uint8_t hw_dma_cap;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index aef87ba68aec..18493f4e6257 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -35,6 +35,8 @@
#include <linux/tcp.h>
#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
+#include <linux/crc32.h>
+#include <linux/inetdevice.h>
#include "macb.h"
#define MACB_RX_BUFFER_SIZE 128
@@ -74,13 +76,11 @@
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
#define MACB_NETIF_LSO NETIF_F_TSO
-#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
-#define MACB_WOL_ENABLED (0x1 << 1)
-
/* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
#define MACB_HALT_TIMEOUT 1230
+#define MACB_PM_TIMEOUT 100 /* ms */
#define MACB_PM_TIMEOUT 100 /* ms */
@@ -275,6 +275,9 @@ static void macb_set_hwaddr(struct macb *bp)
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
macb_or_gem_writel(bp, SA1T, top);
+ gem_writel(bp, RXPTPUNI, bottom);
+ gem_writel(bp, TXPTPUNI, bottom);
+
/* Clear unused address register sets */
macb_or_gem_writel(bp, SA2B, 0);
macb_or_gem_writel(bp, SA2T, 0);
@@ -576,8 +579,8 @@ static int macb_mii_probe(struct net_device *dev)
static int macb_mii_init(struct macb *bp)
{
- struct device_node *np;
- int err = -ENXIO;
+ struct device_node *np, *mdio_np;
+ int err = -ENXIO, i;
/* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -594,19 +597,39 @@ static int macb_mii_init(struct macb *bp)
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
- bp->mii_bus->parent = &bp->pdev->dev;
+ bp->mii_bus->parent = &bp->dev->dev;
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
np = bp->pdev->dev.of_node;
- if (np && of_phy_is_fixed_link(np)) {
- if (of_phy_register_fixed_link(np) < 0) {
- dev_err(&bp->pdev->dev,
- "broken fixed-link specification %pOF\n", np);
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (mdio_np) {
+ of_node_put(mdio_np);
+ err = of_mdiobus_register(bp->mii_bus, mdio_np);
+ if (err)
goto err_out_free_mdiobus;
- }
+ } else if (np) {
+ /* try dt phy registration */
+ err = of_mdiobus_register(bp->mii_bus, np);
- err = mdiobus_register(bp->mii_bus);
+ /* fallback to standard phy registration if no phy were
+ * found during dt phy registration
+ */
+ if (!err && !phy_find_first(bp->mii_bus)) {
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan(bp->mii_bus, i);
+ if (IS_ERR(phydev) &&
+ PTR_ERR(phydev) != -ENODEV) {
+ err = PTR_ERR(phydev);
+ break;
+ }
+ }
+
+ if (err)
+ goto err_out_unregister_bus;
+ }
} else {
err = of_mdiobus_register(bp->mii_bus, np);
}
@@ -920,7 +943,6 @@ static void gem_rx_refill(struct macb_queue *queue)
/* Make hw descriptor updates visible to CPU */
rmb();
- queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) {
@@ -959,6 +981,7 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED);
}
+ queue->rx_prepared_head++;
}
/* Make descriptor updates visible to hardware */
@@ -989,6 +1012,15 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
*/
}
+static int macb_validate_hw_csum(struct sk_buff *skb)
+{
+ u32 pkt_csum = *((u32 *)&skb->data[skb->len - ETH_FCS_LEN]);
+ u32 csum = ~crc32_le(~0, skb_mac_header(skb),
+ skb->len + ETH_HLEN - ETH_FCS_LEN);
+
+ return (pkt_csum != csum);
+}
+
static int gem_rx(struct macb_queue *queue, int budget)
{
struct macb *bp = queue->bp;
@@ -1049,6 +1081,16 @@ static int gem_rx(struct macb_queue *queue, int budget)
bp->rx_buffer_size, DMA_FROM_DEVICE);
skb->protocol = eth_type_trans(skb, bp->dev);
+
+ /* Validate MAC fcs if RX checsum offload disabled */
+ if (!(bp->dev->features & NETIF_F_RXCSUM)) {
+ if (macb_validate_hw_csum(skb)) {
+ netdev_err(bp->dev, "incorrect FCS\n");
+ bp->dev->stats.rx_dropped++;
+ break;
+ }
+ }
+
skb_checksum_none_assert(skb);
if (bp->dev->features & NETIF_F_RXCSUM &&
!(bp->dev->flags & IFF_PROMISC) &&
@@ -1146,6 +1188,19 @@ static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
break;
}
+ /* Validate MAC fcs if RX checsum offload disabled */
+ if (!(bp->dev->features & NETIF_F_RXCSUM)) {
+ if (macb_validate_hw_csum(skb)) {
+ netdev_err(bp->dev, "incorrect FCS\n");
+ bp->dev->stats.rx_dropped++;
+
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ return 1;
+ }
+ }
+
/* Make descriptor updates visible to hardware */
wmb();
@@ -1371,6 +1426,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
spin_lock(&bp->lock);
while (status) {
+ if (status & MACB_BIT(WOL)) {
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(WOL));
+ break;
+ }
+
/* close possible race with dev_close */
if (unlikely(!netif_running(dev))) {
queue_writel(queue, IDR, -1);
@@ -1621,7 +1682,8 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
if ((bp->dev->features & NETIF_F_HW_CSUM) &&
- skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
+ skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
+ (skb->data_len == 0))
ctrl |= MACB_BIT(TX_NOCRC);
} else
/* Only set MSS/MFS on payload descriptors
@@ -1716,9 +1778,11 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
struct sk_buff *nskb;
u32 fcs;
+ /* Not available for GSO and fragments */
if (!(ndev->features & NETIF_F_HW_CSUM) ||
!((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
- skb_shinfo(*skb)->gso_size) /* Not available for GSO */
+ skb_shinfo(*skb)->gso_size ||
+ ((*skb)->data_len > 0))
return 0;
if (padlen <= 0) {
@@ -1772,7 +1836,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen;
- bool is_lso, is_udp = 0;
+ bool is_lso, is_udp = false;
netdev_tx_t ret = NETDEV_TX_OK;
if (macb_clear_csum(skb)) {
@@ -1934,6 +1998,12 @@ static void macb_free_consistent(struct macb *bp)
bp->macbgem_ops.mog_free_rx_buffers(bp);
+ if (bp->rx_ring_tieoff) {
+ dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
+ bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
+ bp->rx_ring_tieoff = NULL;
+ }
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
@@ -2023,6 +2093,14 @@ static int macb_alloc_consistent(struct macb *bp)
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
+ /* Required for tie off descriptor for PM cases */
+ bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
+ macb_dma_desc_get_size(bp),
+ &bp->rx_ring_tieoff_dma,
+ GFP_KERNEL);
+ if (!bp->rx_ring_tieoff)
+ goto out_err;
+
return 0;
out_err:
@@ -2030,6 +2108,19 @@ out_err:
return -ENOMEM;
}
+static void macb_init_tieoff(struct macb *bp)
+{
+ struct macb_dma_desc *d = bp->rx_ring_tieoff;
+
+ if (bp->num_queues > 1) {
+ /* Setup a wrapping descriptor with no free slots
+ * (WRAP and USED) to tie off/disable unused RX queues.
+ */
+ macb_set_addr(bp, d, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED));
+ d->ctrl = 0;
+ }
+}
+
static void gem_init_rings(struct macb *bp)
{
struct macb_queue *queue;
@@ -2052,6 +2143,7 @@ static void gem_init_rings(struct macb *bp)
gem_rx_refill(queue);
}
+ macb_init_tieoff(bp);
}
@@ -2070,6 +2162,8 @@ static void macb_init_rings(struct macb *bp)
bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0;
desc->ctrl |= MACB_BIT(TX_WRAP);
+
+ macb_init_tieoff(bp);
}
static void macb_reset_hw(struct macb *bp)
@@ -2092,6 +2186,10 @@ static void macb_reset_hw(struct macb *bp)
macb_writel(bp, TSR, -1);
macb_writel(bp, RSR, -1);
+ /* Disable RX partial store and forward and reset watermark value */
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+ gem_writel(bp, PBUFRXCUT, 0xFFF);
+
/* Disable all interrupts */
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, IDR, -1);
@@ -2231,7 +2329,11 @@ static void macb_init_hw(struct macb *bp)
config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
config |= MACB_BIT(PAE); /* PAuse Enable */
- config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+
+ /* Do not discard Rx FCS if RX checsum offload disabled */
+ if (bp->dev->features & NETIF_F_RXCSUM)
+ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+
if (bp->caps & MACB_CAPS_JUMBO)
config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
else
@@ -2247,13 +2349,24 @@ static void macb_init_hw(struct macb *bp)
if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
gem_writel(bp, JML, bp->jumbo_max_len);
bp->speed = SPEED_10;
- bp->duplex = DUPLEX_HALF;
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+ bp->duplex = DUPLEX_FULL;
+ else
+ bp->duplex = DUPLEX_HALF;
bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
if (bp->caps & MACB_CAPS_JUMBO)
bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
macb_configure_dma(bp);
+ /* Enable RX partial store and forward and set watermark */
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) {
+ gem_writel(bp, PBUFRXCUT,
+ (gem_readl(bp, PBUFRXCUT) &
+ GEM_BF(WTRMRK, bp->rx_watermark)) |
+ GEM_BIT(ENCUTTHRU));
+ }
+
/* Initialize TX and RX buffers */
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
@@ -2274,8 +2387,14 @@ static void macb_init_hw(struct macb *bp)
MACB_BIT(HRESP));
}
+ if ((bp->phy_interface == PHY_INTERFACE_MODE_SGMII) &&
+ (bp->caps & MACB_CAPS_PCS))
+ gem_writel(bp, PCSCNTRL,
+ gem_readl(bp, PCSCNTRL) | GEM_BIT(PCSAUTONEG));
+
/* Enable TX and RX */
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE) |
+ MACB_BIT(PTPUNI));
}
/* The hash address register is 64 bits long and takes up two
@@ -2411,6 +2530,10 @@ static int macb_open(struct net_device *dev)
if (err < 0)
goto pm_exit;
+ err = pm_runtime_get_sync(&bp->pdev->dev);
+ if (err < 0)
+ return err;
+
/* carrier starts down */
netif_carrier_off(dev);
@@ -2479,6 +2602,8 @@ static int macb_close(struct net_device *dev)
pm_runtime_put(&bp->pdev->dev);
+ pm_runtime_put(&bp->pdev->dev);
+
return 0;
}
@@ -2692,38 +2817,6 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs_buff[13] = gem_readl(bp, DMACFG);
}
-static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct macb *bp = netdev_priv(netdev);
-
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
- wol->supported = WAKE_MAGIC;
-
- if (bp->wol & MACB_WOL_ENABLED)
- wol->wolopts |= WAKE_MAGIC;
- }
-}
-
-static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct macb *bp = netdev_priv(netdev);
-
- if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
- (wol->wolopts & ~WAKE_MAGIC))
- return -EOPNOTSUPP;
-
- if (wol->wolopts & WAKE_MAGIC)
- bp->wol |= MACB_WOL_ENABLED;
- else
- bp->wol &= ~MACB_WOL_ENABLED;
-
- device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
-
- return 0;
-}
static void macb_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
@@ -3159,8 +3252,6 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
- .get_wol = macb_get_wol,
- .set_wol = macb_set_wol,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
@@ -3311,10 +3402,29 @@ static void macb_configure_caps(struct macb *bp,
const struct macb_config *dt_conf)
{
u32 dcfg;
+ int retval;
if (dt_conf)
bp->caps = dt_conf->caps;
+ /* By default we set to partial store and forward mode for zynqmp.
+ * Disable if not set in devicetree.
+ */
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) {
+ retval = of_property_read_u16(bp->pdev->dev.of_node,
+ "rx-watermark",
+ &bp->rx_watermark);
+
+ /* Disable partial store and forward in case of error or
+ * invalid watermark value
+ */
+ if (retval || bp->rx_watermark > 0xFFF) {
+ dev_info(&bp->pdev->dev,
+ "Not enabling partial store and forward\n");
+ bp->caps &= ~MACB_CAPS_PARTIAL_STORE_FORWARD;
+ }
+ }
+
if (hw_is_gem(bp->regs, bp->native_io)) {
bp->caps |= MACB_CAPS_MACB_IS_GEM;
@@ -3372,18 +3482,9 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
struct clk **hclk, struct clk **tx_clk,
struct clk **rx_clk, struct clk **tsu_clk)
{
- struct macb_platform_data *pdata;
int err;
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata) {
- *pclk = pdata->pclk;
- *hclk = pdata->hclk;
- } else {
- *pclk = devm_clk_get(&pdev->dev, "pclk");
- *hclk = devm_clk_get(&pdev->dev, "hclk");
- }
-
+ *pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR_OR_NULL(*pclk)) {
err = PTR_ERR(*pclk);
if (!err)
@@ -3393,6 +3494,7 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
return err;
}
+ *hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR_OR_NULL(*hclk)) {
err = PTR_ERR(*hclk);
if (!err)
@@ -3414,6 +3516,10 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
if (IS_ERR(*tsu_clk))
*tsu_clk = NULL;
+ *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk");
+ if (IS_ERR(*tsu_clk))
+ *tsu_clk = NULL;
+
err = clk_prepare_enable(*pclk);
if (err) {
dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
@@ -3562,6 +3668,8 @@ static int macb_init(struct platform_device *pdev)
/* Checksum offload is only available on gem with packet buffer */
if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+ dev->hw_features &= ~NETIF_F_RXCSUM;
if (bp->caps & MACB_CAPS_SG_DISABLED)
dev->hw_features &= ~NETIF_F_SG;
dev->features = dev->hw_features;
@@ -3613,6 +3721,11 @@ static int macb_init(struct platform_device *pdev)
val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
macb_writel(bp, NCFGR, val);
+ if ((bp->phy_interface == PHY_INTERFACE_MODE_SGMII) &&
+ (bp->caps & MACB_CAPS_PCS))
+ gem_writel(bp, PCSCNTRL,
+ gem_readl(bp, PCSCNTRL) | GEM_BIT(PCSAUTONEG));
+
return 0;
}
@@ -4017,7 +4130,9 @@ static const struct macb_config np4_config = {
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH |
+ MACB_CAPS_PCS |
+ MACB_CAPS_PARTIAL_STORE_FORWARD | MACB_CAPS_WOL,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4071,6 +4186,7 @@ static int macb_probe(struct platform_device *pdev)
struct clk **) = macb_config->clk_init;
int (*init)(struct platform_device *) = macb_config->init;
struct device_node *np = pdev->dev.of_node;
+ struct device_node *phy_node;
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
struct clk *tsu_clk = NULL;
unsigned int queue_mask, num_queues;
@@ -4142,14 +4258,12 @@ static int macb_probe(struct platform_device *pdev)
bp->tx_clk = tx_clk;
bp->rx_clk = rx_clk;
bp->tsu_clk = tsu_clk;
+ if (tsu_clk)
+ bp->tsu_rate = clk_get_rate(tsu_clk);
+
if (macb_config)
bp->jumbo_max_len = macb_config->jumbo_max_len;
- bp->wol = 0;
- if (of_get_property(np, "magic-packet", NULL))
- bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
- device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
-
spin_lock_init(&bp->lock);
/* setup capabilities */
@@ -4202,6 +4316,18 @@ static int macb_probe(struct platform_device *pdev)
macb_get_hwaddr(bp);
}
+ /* Power up the PHY if there is a GPIO reset */
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy_node && of_phy_is_fixed_link(np)) {
+ err = of_phy_register_fixed_link(np);
+ if (err < 0) {
+ dev_err(&pdev->dev, "broken fixed-link specification");
+ goto err_out_free_netdev;
+ }
+ phy_node = of_node_get(np);
+ }
+ bp->phy_node = phy_node;
+
err = of_get_phy_mode(np);
if (err < 0)
/* not found in DT, MII by default */
@@ -4212,25 +4338,28 @@ static int macb_probe(struct platform_device *pdev)
/* IP specific init */
err = init(pdev);
if (err)
- goto err_out_free_netdev;
+ goto err_out_phy_put;
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_phy_put;
+ }
err = macb_mii_init(bp);
if (err)
- goto err_out_free_netdev;
+ goto err_out_unregister_netdev;
phydev = dev->phydev;
netif_carrier_off(dev);
- err = register_netdev(dev);
- if (err) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_unregister_mdio;
- }
-
tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
(unsigned long)bp);
+ if (bp->caps & MACB_CAPS_WOL)
+ device_set_wakeup_capable(&bp->dev->dev, 1);
+
phy_attached_info(phydev);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
@@ -4242,13 +4371,13 @@ static int macb_probe(struct platform_device *pdev)
return 0;
-err_out_unregister_mdio:
- phy_disconnect(dev->phydev);
- mdiobus_unregister(bp->mii_bus);
+err_out_unregister_netdev:
+ unregister_netdev(dev);
+
+err_out_phy_put:
of_node_put(bp->phy_node);
if (np && of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
- mdiobus_free(bp->mii_bus);
err_out_free_netdev:
free_netdev(dev);
@@ -4310,16 +4439,50 @@ static int __maybe_unused macb_suspend(struct device *dev)
struct macb_queue *queue = bp->queues;
unsigned long flags;
unsigned int q;
+ u32 ctrl, arpipmask;
if (!netif_running(netdev))
return 0;
+ if (device_may_wakeup(&bp->dev->dev)) {
+ spin_lock_irqsave(&bp->lock, flags);
+ ctrl = macb_readl(bp, NCR);
+ ctrl &= ~(MACB_BIT(TE) | MACB_BIT(RE));
+ macb_writel(bp, NCR, ctrl);
+ /* Tie off RX queues */
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue) {
+ queue_writel(queue, RBQP,
+ lower_32_bits(bp->rx_ring_tieoff_dma));
+ }
+ ctrl = macb_readl(bp, NCR);
+ ctrl |= MACB_BIT(RE);
+ macb_writel(bp, NCR, ctrl);
+ gem_writel(bp, NCFGR, gem_readl(bp, NCFGR) & ~MACB_BIT(NBC));
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
+ macb_readl(bp, ISR);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, -1);
- if (bp->wol & MACB_WOL_ENABLED) {
+ /* Enable WOL (Q0 only) and disable all other interrupts */
macb_writel(bp, IER, MACB_BIT(WOL));
- macb_writel(bp, WOL, MACB_BIT(MAG));
+ for (q = 1, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue) {
+ queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+
+ arpipmask = cpu_to_be32p(&bp->dev->ip_ptr->ifa_list->ifa_local)
+ & 0xFFFF;
+ gem_writel(bp, WOL, MACB_BIT(ARP) | arpipmask);
+ spin_unlock_irqrestore(&bp->lock, flags);
enable_irq_wake(bp->queues[0].irq);
netif_device_detach(netdev);
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue)
+ napi_disable(&queue->napi);
} else {
netif_device_detach(netdev);
for (q = 0, queue = bp->queues; q < bp->num_queues;
@@ -4352,6 +4515,7 @@ static int __maybe_unused macb_resume(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
struct macb_queue *queue = bp->queues;
+ unsigned long flags;
unsigned int q;
if (!netif_running(netdev))
@@ -4360,10 +4524,20 @@ static int __maybe_unused macb_resume(struct device *dev)
if (!device_may_wakeup(dev))
pm_runtime_force_resume(dev);
- if (bp->wol & MACB_WOL_ENABLED) {
+ if (device_may_wakeup(&bp->dev->dev)) {
+ spin_lock_irqsave(&bp->lock, flags);
macb_writel(bp, IDR, MACB_BIT(WOL));
- macb_writel(bp, WOL, 0);
+ gem_writel(bp, WOL, 0);
+ /* Clear Q0 ISR as WOL was enabled on Q0 */
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, -1);
disable_irq_wake(bp->queues[0].irq);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ macb_writel(bp, NCR, MACB_BIT(MPE));
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue)
+ napi_enable(&queue->napi);
+ netif_carrier_on(netdev);
} else {
macb_writel(bp, NCR, MACB_BIT(MPE));
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 0a8aca8d3634..a273b7ead887 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -104,7 +104,10 @@ static int gem_tsu_incr_set(struct macb *bp, struct tsu_incr *incr_spec)
* to take effect.
*/
spin_lock_irqsave(&bp->tsu_clk_lock, flags);
- gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, incr_spec->sub_ns));
+ /* RegBit[15:0] = Subns[23:8]; RegBit[31:24] = Subns[7:0] */
+ gem_writel(bp, TISUBN, GEM_BF(SUBNSINCRL, incr_spec->sub_ns) |
+ GEM_BF(SUBNSINCRH, (incr_spec->sub_ns >>
+ GEM_SUBNSINCRL_SIZE)));
gem_writel(bp, TI, GEM_BF(NSINCR, incr_spec->ns));
spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
@@ -135,7 +138,7 @@ static int gem_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
* (temp / USEC_PER_SEC) + 0.5
*/
adj += (USEC_PER_SEC >> 1);
- adj >>= GEM_SUBNSINCR_SIZE; /* remove fractions */
+ adj >>= PPM_FRACTION; /* remove fractions */
adj = div_u64(adj, USEC_PER_SEC);
adj = neg_adj ? (word - adj) : (word + adj);
@@ -239,6 +242,7 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
u32 dma_desc_ts_2, struct timespec64 *ts)
{
struct timespec64 tsu;
+ bool sec_rollover = false;
ts->tv_sec = (GEM_BFEXT(DMA_SECH, dma_desc_ts_2) << GEM_DMA_SECL_SIZE) |
GEM_BFEXT(DMA_SECL, dma_desc_ts_1);
@@ -256,9 +260,12 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
*/
if ((ts->tv_sec & (GEM_DMA_SEC_TOP >> 1)) &&
!(tsu.tv_sec & (GEM_DMA_SEC_TOP >> 1)))
- ts->tv_sec -= GEM_DMA_SEC_TOP;
+ sec_rollover = true;
+
+ ts->tv_sec |= ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
- ts->tv_sec += ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
+ if (sec_rollover)
+ ts->tv_sec -= GEM_DMA_SEC_TOP;
return 0;
}
@@ -352,7 +359,6 @@ void gem_ptp_init(struct net_device *dev)
bp->ptp_clock_info = gem_ptp_caps_template;
/* nominal frequency and maximum adjustment in ppb */
- bp->tsu_rate = bp->ptp_info->get_tsu_rate(bp);
bp->ptp_clock_info.max_adj = bp->ptp_info->get_ptp_max_adj();
gem_ptp_init_timer(bp);
bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &dev->dev);
@@ -382,11 +388,14 @@ void gem_ptp_init(struct net_device *dev)
void gem_ptp_remove(struct net_device *ndev)
{
struct macb *bp = netdev_priv(ndev);
+ unsigned long flags;
if (bp->ptp_clock)
ptp_clock_unregister(bp->ptp_clock);
+ spin_lock_irqsave(&bp->tsu_clk_lock, flags);
gem_ptp_clear_timer(bp);
+ spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
dev_info(&bp->pdev->dev, "%s ptp clock unregistered.\n",
GEM_PTP_TIMER_NAME);
@@ -459,7 +468,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
return -ERANGE;
/* fall through */
case HWTSTAMP_TX_ON:
- tx_bd_control = TSTAMP_ALL_FRAMES;
+ tx_bd_control = TSTAMP_ALL_PTP_FRAMES;
break;
default:
return -ERANGE;
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index af96e05c5bcd..b026fe2151a3 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -1,12 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Xilink device configuration
+# Xilinx device configuration
#
config NET_VENDOR_XILINX
bool "Xilinx devices"
default y
- depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || COMPILE_TEST
+ depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || ARCH_ZYNQMP || X86 || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -19,18 +19,33 @@ if NET_VENDOR_XILINX
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
- depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS
+ depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || ARCH_ZYNQMP
select PHYLIB
---help---
This driver supports the 10/100 Ethernet Lite from Xilinx.
config XILINX_AXI_EMAC
- tristate "Xilinx 10/100/1000 AXI Ethernet support"
- depends on MICROBLAZE
+ tristate "Xilinx AXI Ethernet support"
+ depends on (MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP)
select PHYLIB
---help---
- This driver supports the 10/100/1000 Ethernet from Xilinx for the
- AXI bus interface used in Xilinx Virtex FPGAs.
+ This driver supports the Xilinx AXI 1G/2.5G, 10 Gigabit,
+ 10G/25G High Speed and USXGMII Ethernet Subsystem.
+
+config XILINX_AXI_EMAC_HWTSTAMP
+ bool "Generate hardware packet timestamps"
+ depends on XILINX_AXI_EMAC
+ select PTP_1588_CLOCK
+ default n
+ ---help---
+ Generate hardware packet timestamps. This is to facilitate IEEE 1588.
+
+config AXIENET_HAS_MCDMA
+ bool "AXI Ethernet is configured with MCDMA"
+ depends on XILINX_AXI_EMAC
+ default n
+ ---help---
+ When hardware is generated with AXI Ethernet with MCDMA select this option.
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
@@ -40,4 +55,54 @@ config XILINX_LL_TEMAC
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
core used in Xilinx Spartan and Virtex FPGAs
+config XILINX_TSN
+ bool "Enable Xilinx's TSN IP"
+ default n
+ ---help---
+ Enable Xilinx's TSN IP.
+
+config XILINX_TSN_PTP
+ bool "Generate hardware packet timestamps using Xilinx's TSN IP"
+ depends on XILINX_TSN
+ select PTP_1588_CLOCK
+ default y
+ ---help---
+ Generate hardware packet timestamps. This is to facilitate IEEE 1588.
+
+config XILINX_TSN_QBV
+ bool "Support Qbv protocol in TSN"
+ depends on XILINX_TSN_PTP
+ select PTP_1588_CLOCK
+ default y
+ ---help---
+ Enables TSN Qbv protocol.
+
+config XILINX_TSN_SWITCH
+ bool "Support TSN switch"
+ depends on XILINX_TSN
+ default y
+ ---help---
+ Enable Xilinx's TSN Switch support.
+
+config XILINX_TSN_QCI
+ bool "Support Qci protocol in TSN"
+ depends on XILINX_TSN_SWITCH
+ default y
+ ---help---
+ Enable TSN QCI protocol.
+
+config XILINX_TSN_CB
+ bool "Support CB protocol in TSN"
+ depends on XILINX_TSN_SWITCH
+ default y
+ ---help---
+ Enable TSN CB protocol support.
+
+config XILINX_TSN_QBR
+ bool "Support QBR protocol in TSN"
+ depends on XILINX_TSN_SWITCH
+ default y
+ ---help---
+ Enable TSN QBR protocol support.
+
endif # NET_VENDOR_XILINX
diff --git a/drivers/net/ethernet/xilinx/Makefile b/drivers/net/ethernet/xilinx/Makefile
index 7d7dc1771423..636b82adb4a3 100644
--- a/drivers/net/ethernet/xilinx/Makefile
+++ b/drivers/net/ethernet/xilinx/Makefile
@@ -6,5 +6,13 @@
ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
-xilinx_emac-objs := xilinx_axienet_main.o xilinx_axienet_mdio.o
+obj-$(CONFIG_XILINX_TSN) += xilinx_tsn_ep.o
+obj-$(CONFIG_XILINX_TSN_PTP) += xilinx_tsn_ptp_xmit.o xilinx_tsn_ptp_clock.o
+obj-$(CONFIG_XILINX_TSN_QBV) += xilinx_tsn_shaper.o
+obj-$(CONFIG_XILINX_TSN_QCI) += xilinx_tsn_qci.o
+obj-$(CONFIG_XILINX_TSN_CB) += xilinx_tsn_cb.o
+obj-$(CONFIG_XILINX_TSN_SWITCH) += xilinx_tsn_switch.o
+xilinx_emac-objs := xilinx_axienet_main.o xilinx_axienet_mdio.o xilinx_axienet_dma.o
obj-$(CONFIG_XILINX_AXI_EMAC) += xilinx_emac.o
+obj-$(CONFIG_XILINX_TSN_QBR) += xilinx_tsn_preemption.o
+obj-$(CONFIG_AXIENET_HAS_MCDMA) += xilinx_axienet_mcdma.o
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 3a6ae1f3c45d..bffd2ba2604b 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -748,7 +748,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_t *frag;
num_frag = skb_shinfo(skb)->nr_frags;
- frag = &skb_shinfo(skb)->frags[0];
start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -792,6 +791,8 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
+ frag = &skb_shinfo(skb)->frags[ii];
+ lp->tx_bd_tail++;
if (++lp->tx_bd_tail >= TX_BD_NUM)
lp->tx_bd_tail = 0;
@@ -824,7 +825,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->phys = cpu_to_be32(skb_dma_addr);
cur_p->len = cpu_to_be32(skb_frag_size(frag));
cur_p->app0 = 0;
- frag++;
}
cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
@@ -1276,9 +1276,11 @@ static int temac_probe(struct platform_device *pdev)
lp->temac_features = 0;
if (temac_np) {
p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
+ dev_info(&op->dev, "TX_CSUM %d\n", be32_to_cpup(p));
if (p && be32_to_cpu(*p))
lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
+ dev_info(&op->dev, "RX_CSUM %d\n", be32_to_cpup(p));
if (p && be32_to_cpu(*p))
lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
} else if (pdata) {
@@ -1401,14 +1403,15 @@ static int temac_probe(struct platform_device *pdev)
temac_init_mac_address(ndev, pdata->mac_addr);
}
- rc = temac_mdio_setup(lp, pdev);
- if (rc)
- dev_warn(&pdev->dev, "error registering MDIO bus\n");
-
if (temac_np) {
lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
- if (lp->phy_node)
+ if (lp->phy_node) {
dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
+
+ rc = temac_mdio_setup(lp, op->dev.of_node);
+ if (rc)
+ dev_warn(&op->dev, "error registering MDIO bus\n");
+ }
} else if (pdata) {
snprintf(lp->phy_name, sizeof(lp->phy_name),
PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index a4667326f745..1c90f9ecbdf2 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -67,6 +67,7 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
int clk_div;
int rc;
struct resource res;
+ struct device_node *np1 = of_get_parent(lp->phy_node);
/* Get MDIO bus frequency (if specified) */
bus_hz = 0;
@@ -96,7 +97,7 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
return -ENOMEM;
if (np) {
- of_address_to_resource(np, 0, &res);
+ of_address_to_resource(np1, 0, &res);
snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
(unsigned long long)res.start);
} else if (pdata) {
@@ -112,7 +113,7 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
lp->mii_bus = bus;
- rc = of_mdiobus_register(bus, np);
+ rc = of_mdiobus_register(bus, np1);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 011adae32b89..e29757f6f3e0 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -3,7 +3,7 @@
* Definitions for Xilinx Axi Ethernet device driver.
*
* Copyright (c) 2009 Secret Lab Technologies, Ltd.
- * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
+ * Copyright (c) 2010 - 2018 Xilinx, Inc. All rights reserved.
*/
#ifndef XILINX_AXIENET_H
@@ -13,6 +13,9 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/if_vlan.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/of_platform.h>
/* Packet size info */
#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
@@ -24,6 +27,21 @@
#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + VLAN_ETH_HLEN + XAE_TRL_SIZE)
#define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
+/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
+#define TX_BD_NUM 64
+#define RX_BD_NUM 128
+
+/* DMA address width min and max range */
+#define XAE_DMA_MASK_MIN 32
+#define XAE_DMA_MASK_MAX 64
+
+/* In AXI DMA Tx and Rx queue count is same */
+#define for_each_tx_dma_queue(lp, var) \
+ for ((var) = 0; (var) < (lp)->num_tx_queues; (var)++)
+
+#define for_each_rx_dma_queue(lp, var) \
+ for ((var) = 0; (var) < (lp)->num_rx_queues; (var)++)
+
/* Configuration options */
/* Accept all incoming packets. Default: disabled (cleared) */
@@ -119,7 +137,7 @@
/* Default TX/RX Threshold and waitbound values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
#define XAXIDMA_DFT_TX_WAITBOUND 254
-#define XAXIDMA_DFT_RX_THRESHOLD 24
+#define XAXIDMA_DFT_RX_THRESHOLD 1
#define XAXIDMA_DFT_RX_WAITBOUND 254
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
@@ -138,6 +156,22 @@
#define XAXIDMA_BD_MINIMUM_ALIGNMENT 0x40
+/* AXI Tx Timestamp Stream FIFO Register Definitions */
+#define XAXIFIFO_TXTS_ISR 0x00000000 /* Interrupt Status Register */
+#define XAXIFIFO_TXTS_TXFD 0x00000010 /* Tx Data Write Port */
+#define XAXIFIFO_TXTS_TLR 0x00000014 /* Transmit Length Register */
+#define XAXIFIFO_TXTS_RFO 0x0000001C /* Rx Fifo Occupancy */
+#define XAXIFIFO_TXTS_RDFR 0x00000018 /* Rx Fifo reset */
+#define XAXIFIFO_TXTS_RXFD 0x00000020 /* Rx Data Read Port */
+#define XAXIFIFO_TXTS_RLR 0x00000024 /* Receive Length Register */
+#define XAXIFIFO_TXTS_SRR 0x00000028 /* AXI4-Stream Reset */
+
+#define XAXIFIFO_TXTS_INT_RC_MASK 0x04000000
+#define XAXIFIFO_TXTS_RXFD_MASK 0x7FFFFFFF
+#define XAXIFIFO_TXTS_RESET_MASK 0x000000A5
+#define XAXIFIFO_TXTS_TAG_MASK 0xFFFF0000
+#define XAXIFIFO_TXTS_TAG_SHIFT 16
+
/* Axi Ethernet registers definition */
#define XAE_RAF_OFFSET 0x00000000 /* Reset and Address filter */
#define XAE_TPF_OFFSET 0x00000004 /* Tx Pause Frame */
@@ -156,22 +190,19 @@
#define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */
#define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */
#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
-#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
-#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
-#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
-#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
-#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
-#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
-#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */
-/* MII Mgmt Interrupt Pending register offset */
-#define XAE_MDIO_MIP_OFFSET 0x00000620
-/* MII Management Interrupt Enable register offset */
-#define XAE_MDIO_MIE_OFFSET 0x00000640
-/* MII Management Interrupt Clear register offset. */
-#define XAE_MDIO_MIC_OFFSET 0x00000660
+#define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */
+#define XAE_RMFC_OFFSET 0x00000414 /* RX Max Frame Configuration */
+#define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */
+#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */
+#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */
+#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MDIO Read Data */
+#define XAE_TEMAC_IS_OFFSET 0x00000600 /* TEMAC Interrupt Status */
+#define XAE_TEMAC_IP_OFFSET 0x00000610 /* TEMAC Interrupt Pending Status */
+#define XAE_TEMAC_IE_OFFSET 0x00000620 /* TEMAC Interrupt Enable Status */
+#define XAE_TEMAC_IC_OFFSET 0x00000630 /* TEMAC Interrupt Clear Status */
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
-#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
+#define XAE_FMC_OFFSET 0x00000708 /* Frame Filter Control */
#define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */
#define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */
@@ -232,6 +263,7 @@
#define XAE_TPID_3_MASK 0xFFFF0000 /* TPID 1 */
/* Bit masks for Axi Ethernet RCW1 register */
+#define XAE_RCW1_INBAND1588_MASK 0x00400000 /* Inband 1588 Enable */
#define XAE_RCW1_RST_MASK 0x80000000 /* Reset */
#define XAE_RCW1_JUM_MASK 0x40000000 /* Jumbo frame enable */
/* In-Band FCS enable (FCS not stripped) */
@@ -248,6 +280,7 @@
#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF
/* Bit masks for Axi Ethernet TC register */
+#define XAE_TC_INBAND1588_MASK 0x00400000 /* Inband 1588 Enable */
#define XAE_TC_RST_MASK 0x80000000 /* Reset */
#define XAE_TC_JUM_MASK 0x40000000 /* Jumbo frame enable */
/* In-Band FCS enable (FCS not generated) */
@@ -272,18 +305,7 @@
#define XAE_EMMC_LINKSPD_10 0x00000000 /* Link Speed mask for 10 Mbit */
#define XAE_EMMC_LINKSPD_100 0x40000000 /* Link Speed mask for 100 Mbit */
#define XAE_EMMC_LINKSPD_1000 0x80000000 /* Link Speed mask for 1000 Mbit */
-
-/* Bit masks for Axi Ethernet PHYC register */
-#define XAE_PHYC_SGMIILINKSPEED_MASK 0xC0000000 /* SGMII link speed mask*/
-#define XAE_PHYC_RGMIILINKSPEED_MASK 0x0000000C /* RGMII link speed */
-#define XAE_PHYC_RGMIIHD_MASK 0x00000002 /* RGMII Half-duplex */
-#define XAE_PHYC_RGMIILINK_MASK 0x00000001 /* RGMII link status */
-#define XAE_PHYC_RGLINKSPD_10 0x00000000 /* RGMII link 10 Mbit */
-#define XAE_PHYC_RGLINKSPD_100 0x00000004 /* RGMII link 100 Mbit */
-#define XAE_PHYC_RGLINKSPD_1000 0x00000008 /* RGMII link 1000 Mbit */
-#define XAE_PHYC_SGLINKSPD_10 0x00000000 /* SGMII link 10 Mbit */
-#define XAE_PHYC_SGLINKSPD_100 0x40000000 /* SGMII link 100 Mbit */
-#define XAE_PHYC_SGLINKSPD_1000 0x80000000 /* SGMII link 1000 Mbit */
+#define XAE_EMMC_LINKSPD_2500 0x80000000 /* Link Speed mask for 2500 Mbit */
/* Bit masks for Axi Ethernet MDIO interface MC register */
#define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable */
@@ -301,30 +323,19 @@
#define XAE_MDIO_MCR_INITIATE_MASK 0x00000800 /* Ready Mask */
#define XAE_MDIO_MCR_READY_MASK 0x00000080 /* Ready Mask */
-/* Bit masks for Axi Ethernet MDIO interface MIS, MIP, MIE, MIC registers */
-#define XAE_MDIO_INT_MIIM_RDY_MASK 0x00000001 /* MIIM Interrupt */
-
/* Bit masks for Axi Ethernet UAW1 register */
/* Station address bits [47:32]; Station address
* bits [31:0] are stored in register UAW0
*/
#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
-/* Bit masks for Axi Ethernet FMI register */
-#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
-#define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */
+/* Bit masks for Axi Ethernet FMC register */
+#define XAE_FMC_PM_MASK 0x80000000 /* Promis. mode enable */
+#define XAE_FMC_IND_MASK 0x00000003 /* Index Mask */
#define XAE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */
-/* Defines for different options for C_PHY_TYPE parameter in Axi Ethernet IP */
-#define XAE_PHY_TYPE_MII 0
-#define XAE_PHY_TYPE_GMII 1
-#define XAE_PHY_TYPE_RGMII_1_3 2
-#define XAE_PHY_TYPE_RGMII_2_0 3
-#define XAE_PHY_TYPE_SGMII 4
-#define XAE_PHY_TYPE_1000BASE_X 5
-
- /* Total number of entries in the hardware multicast table. */
+/* Total number of entries in the hardware multicast table. */
#define XAE_MULTICAST_CAM_TABLE_NUM 4
/* Axi Ethernet Synthesis features */
@@ -341,12 +352,150 @@
#define DELAY_OF_ONE_MILLISEC 1000
+#define XAXIENET_NAPI_WEIGHT 64
+
+/* Defintions of 1588 PTP in Axi Ethernet IP */
+#define TX_TS_OP_NOOP 0x0
+#define TX_TS_OP_ONESTEP 0x1
+#define TX_TS_OP_TWOSTEP 0x2
+#define TX_TS_CSUM_UPDATE 0x1
+#define TX_PTP_CSUM_OFFSET 0x28
+#define TX_PTP_TS_OFFSET 0x4C
+
+/* Read/Write access to the registers */
+#ifndef out_be32
+#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_ARCH_ZYNQMP)
+#define in_be32(offset) __raw_readl(offset)
+#define out_be32(offset, val) __raw_writel(val, offset)
+#endif
+#endif
+
+/* XXV MAC Register Definitions */
+#define XXV_GT_RESET_OFFSET 0x00000000
+#define XXV_TC_OFFSET 0x0000000C
+#define XXV_RCW1_OFFSET 0x00000014
+#define XXV_JUM_OFFSET 0x00000018
+#define XXV_TICKREG_OFFSET 0x00000020
+#define XXV_STATRX_BLKLCK_OFFSET 0x0000040C
+#define XXV_USXGMII_AN_OFFSET 0x000000C8
+#define XXV_USXGMII_AN_STS_OFFSET 0x00000458
+
+/* XXV MAC Register Mask Definitions */
+#define XXV_GT_RESET_MASK BIT(0)
+#define XXV_TC_TX_MASK BIT(0)
+#define XXV_RCW1_RX_MASK BIT(0)
+#define XXV_RCW1_FCS_MASK BIT(1)
+#define XXV_TC_FCS_MASK BIT(1)
+#define XXV_MIN_JUM_MASK GENMASK(7, 0)
+#define XXV_MAX_JUM_MASK GENMASK(10, 8)
+#define XXV_RX_BLKLCK_MASK BIT(0)
+#define XXV_TICKREG_STATEN_MASK BIT(0)
+#define XXV_MAC_MIN_PKT_LEN 64
+
+/* USXGMII Register Mask Definitions */
+#define USXGMII_AN_EN BIT(5)
+#define USXGMII_AN_RESET BIT(6)
+#define USXGMII_AN_RESTART BIT(7)
+#define USXGMII_EN BIT(16)
+#define USXGMII_RATE_MASK 0x0E000700
+#define USXGMII_RATE_1G 0x04000200
+#define USXGMII_RATE_2G5 0x08000400
+#define USXGMII_RATE_10M 0x0
+#define USXGMII_RATE_100M 0x02000100
+#define USXGMII_RATE_5G 0x0A000500
+#define USXGMII_RATE_10G 0x06000300
+#define USXGMII_FD BIT(28)
+#define USXGMII_LINK_STS BIT(31)
+
+/* USXGMII AN STS register mask definitions */
+#define USXGMII_AN_STS_COMP_MASK BIT(16)
+
+/* MCDMA Register Definitions */
+#define XMCDMA_CR_OFFSET 0x00
+#define XMCDMA_SR_OFFSET 0x04
+#define XMCDMA_CHEN_OFFSET 0x08
+#define XMCDMA_CHSER_OFFSET 0x0C
+#define XMCDMA_ERR_OFFSET 0x10
+#define XMCDMA_PKTDROP_OFFSET 0x14
+#define XMCDMA_TXWEIGHT0_OFFSET 0x18
+#define XMCDMA_TXWEIGHT1_OFFSET 0x1C
+#define XMCDMA_RXINT_SER_OFFSET 0x20
+#define XMCDMA_TXINT_SER_OFFSET 0x28
+
+#define XMCDMA_CHOBS1_OFFSET 0x440
+#define XMCDMA_CHOBS2_OFFSET 0x444
+#define XMCDMA_CHOBS3_OFFSET 0x448
+#define XMCDMA_CHOBS4_OFFSET 0x44C
+#define XMCDMA_CHOBS5_OFFSET 0x450
+#define XMCDMA_CHOBS6_OFFSET 0x454
+
+#define XMCDMA_CHAN_RX_OFFSET 0x500
+
+/* Per Channel Registers */
+#define XMCDMA_CHAN_CR_OFFSET(chan_id) (0x40 + ((chan_id) - 1) * 0x40)
+#define XMCDMA_CHAN_SR_OFFSET(chan_id) (0x44 + ((chan_id) - 1) * 0x40)
+#define XMCDMA_CHAN_CURDESC_OFFSET(chan_id) (0x48 + ((chan_id) - 1) * 0x40)
+#define XMCDMA_CHAN_TAILDESC_OFFSET(chan_id) (0x50 + ((chan_id) - 1) * 0x40)
+#define XMCDMA_CHAN_PKTDROP_OFFSET(chan_id) (0x58 + ((chan_id) - 1) * 0x40)
+
+#define XMCDMA_RX_OFFSET 0x500
+
+/* MCDMA Mask registers */
+#define XMCDMA_CR_RUNSTOP_MASK BIT(0) /* Start/stop DMA channel */
+#define XMCDMA_CR_RESET_MASK BIT(2) /* Reset DMA engine */
+
+#define XMCDMA_SR_HALTED_MASK BIT(0)
+#define XMCDMA_SR_IDLE_MASK BIT(1)
+
+#define XMCDMA_IRQ_ERRON_OTHERQ_MASK BIT(3)
+#define XMCDMA_IRQ_PKTDROP_MASK BIT(4)
+#define XMCDMA_IRQ_IOC_MASK BIT(5)
+#define XMCDMA_IRQ_DELAY_MASK BIT(6)
+#define XMCDMA_IRQ_ERR_MASK BIT(7)
+#define XMCDMA_IRQ_ALL_MASK GENMASK(7, 5)
+#define XMCDMA_PKTDROP_COALESCE_MASK GENMASK(15, 8)
+#define XMCDMA_COALESCE_MASK GENMASK(23, 16)
+#define XMCDMA_DELAY_MASK GENMASK(31, 24)
+
+#define XMCDMA_CHEN_MASK GENMASK(7, 0)
+#define XMCDMA_CHID_MASK GENMASK(7, 0)
+
+#define XMCDMA_ERR_INTERNAL_MASK BIT(0)
+#define XMCDMA_ERR_SLAVE_MASK BIT(1)
+#define XMCDMA_ERR_DECODE_MASK BIT(2)
+#define XMCDMA_ERR_SG_INT_MASK BIT(4)
+#define XMCDMA_ERR_SG_SLV_MASK BIT(5)
+#define XMCDMA_ERR_SG_DEC_MASK BIT(6)
+
+#define XMCDMA_PKTDROP_CNT_MASK GENMASK(31, 0)
+
+#define XMCDMA_BD_CTRL_TXSOF_MASK 0x80000000 /* First tx packet */
+#define XMCDMA_BD_CTRL_TXEOF_MASK 0x40000000 /* Last tx packet */
+#define XMCDMA_BD_CTRL_ALL_MASK 0xC0000000 /* All control bits */
+#define XMCDMA_BD_STS_ALL_MASK 0xF0000000 /* All status bits */
+
+#define XMCDMA_COALESCE_SHIFT 16
+#define XMCDMA_DELAY_SHIFT 24
+#define XMCDMA_DFT_TX_THRESHOLD 1
+
+#define XMCDMA_TXWEIGHT_CH_MASK(chan_id) GENMASK(((chan_id) * 4 + 3), \
+ (chan_id) * 4)
+#define XMCDMA_TXWEIGHT_CH_SHIFT(chan_id) ((chan_id) * 4)
+
+/* PTP Packet length */
+#define XAE_TX_PTP_LEN 16
+#define XXV_TX_PTP_LEN 12
+
+/* Macros used when AXI DMA h/w is configured without DRE */
+#define XAE_TX_BUFFERS 64
+#define XAE_MAX_PKT_LEN 8192
+
/**
* struct axidma_bd - Axi Dma buffer descriptor layout
* @next: MM2S/S2MM Next Descriptor Pointer
- * @reserved1: Reserved and not used
+ * @reserved1: Reserved and not used for 32-bit
* @phys: MM2S/S2MM Buffer Address
- * @reserved2: Reserved and not used
+ * @reserved2: Reserved and not used for 32-bit
* @reserved3: Reserved and not used
* @reserved4: Reserved and not used
* @cntrl: MM2S/S2MM Control value
@@ -357,14 +506,22 @@
* @app3: MM2S/S2MM User Application Field 3.
* @app4: MM2S/S2MM User Application Field 4.
* @sw_id_offset: MM2S/S2MM Sw ID
- * @reserved5: Reserved and not used
- * @reserved6: Reserved and not used
+ * @ptp_tx_skb: If timestamping is enabled used for timestamping skb
+ * Otherwise reserved.
+ * @ptp_tx_ts_tag: Tag value of 2 step timestamping if timestamping is enabled
+ * Otherwise reserved.
+ * @tx_skb: Transmit skb address
+ * @tx_desc_mapping: Tx Descriptor DMA mapping type.
*/
struct axidma_bd {
- u32 next; /* Physical address of next buffer descriptor */
+ phys_addr_t next; /* Physical address of next buffer descriptor */
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
u32 reserved1;
- u32 phys;
+#endif
+ phys_addr_t phys;
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
u32 reserved2;
+#endif
u32 reserved3;
u32 reserved4;
u32 cntrl;
@@ -374,9 +531,90 @@ struct axidma_bd {
u32 app2; /* TX csum seed */
u32 app3;
u32 app4;
- u32 sw_id_offset;
- u32 reserved5;
- u32 reserved6;
+ phys_addr_t sw_id_offset; /* first unused field by h/w */
+ phys_addr_t ptp_tx_skb;
+ u32 ptp_tx_ts_tag;
+ phys_addr_t tx_skb;
+ u32 tx_desc_mapping;
+} __aligned(128);
+
+/**
+ * struct aximcdma_bd - Axi MCDMA buffer descriptor layout
+ * @next: MM2S/S2MM Next Descriptor Pointer
+ * @reserved1: Reserved and not used for 32-bit
+ * @phys: MM2S/S2MM Buffer Address
+ * @reserved2: Reserved and not used for 32-bit
+ * @reserved3: Reserved and not used
+ * @cntrl: MM2S/S2MM Control value
+ * @status: S2MM Status value
+ * @sband_stats: S2MM Sideband Status value
+ * MM2S Status value
+ * @app0: MM2S/S2MM User Application Field 0.
+ * @app1: MM2S/S2MM User Application Field 1.
+ * @app2: MM2S/S2MM User Application Field 2.
+ * @app3: MM2S/S2MM User Application Field 3.
+ * @app4: MM2S/S2MM User Application Field 4.
+ * @sw_id_offset: MM2S/S2MM Sw ID
+ * @ptp_tx_skb: If timestamping is enabled used for timestamping skb
+ * Otherwise reserved.
+ * @ptp_tx_ts_tag: Tag value of 2 step timestamping if timestamping is enabled
+ * Otherwise reserved.
+ * @tx_skb: Transmit skb address
+ * @tx_desc_mapping: Tx Descriptor DMA mapping type.
+ */
+struct aximcdma_bd {
+ phys_addr_t next; /* Physical address of next buffer descriptor */
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
+ u32 reserved1;
+#endif
+ phys_addr_t phys;
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
+ u32 reserved2;
+#endif
+ u32 reserved3;
+ u32 cntrl;
+ u32 status;
+ u32 sband_stats;
+ u32 app0;
+ u32 app1; /* TX start << 16 | insert */
+ u32 app2; /* TX csum seed */
+ u32 app3;
+ u32 app4;
+ phys_addr_t sw_id_offset; /* first unused field by h/w */
+ phys_addr_t ptp_tx_skb;
+ u32 ptp_tx_ts_tag;
+ phys_addr_t tx_skb;
+ u32 tx_desc_mapping;
+} __aligned(128);
+
+#define DESC_DMA_MAP_SINGLE 0
+#define DESC_DMA_MAP_PAGE 1
+
+#if defined(CONFIG_XILINX_TSN)
+#define XAE_MAX_QUEUES 5
+#elif defined(CONFIG_AXIENET_HAS_MCDMA)
+#define XAE_MAX_QUEUES 16
+#else
+#define XAE_MAX_QUEUES 1
+#endif
+
+#ifdef CONFIG_XILINX_TSN
+/* TSN queues range is 2 to 5. For eg: for num_tc = 2 minimum queues = 2;
+ * for num_tc = 3 with sideband signalling maximum queues = 5
+ */
+#define XAE_MAX_TSN_TC 3
+#define XAE_TSN_MIN_QUEUES 2
+#endif
+
+enum axienet_tsn_ioctl {
+ SIOCCHIOCTL = SIOCDEVPRIVATE,
+ SIOC_GET_SCHED,
+ SIOC_PREEMPTION_CFG,
+ SIOC_PREEMPTION_CTRL,
+ SIOC_PREEMPTION_STS,
+ SIOC_PREEMPTION_COUNTER,
+ SIOC_QBU_USER_OVERRIDE,
+ SIOC_QBU_STS,
};
/**
@@ -386,25 +624,31 @@ struct axidma_bd {
* @phy_node: Pointer to device node structure
* @mii_bus: Pointer to MII bus structure
* @regs: Base address for the axienet_local device address space
- * @dma_regs: Base address for the axidma device address space
- * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
- * @tx_irq: Axidma TX IRQ number
- * @rx_irq: Axidma RX IRQ number
+ * @mcdma_regs: Base address for the aximcdma device address space
+ * @napi: Napi Structure array for all dma queues
+ * @num_tx_queues: Total number of Tx DMA queues
+ * @num_rx_queues: Total number of Rx DMA queues
+ * @dq: DMA queues data
* @phy_mode: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
+ * @is_tsn: Denotes a tsn port
+ * @temac_no: Denotes the port number in TSN IP
+ * @num_tc: Total number of TSN Traffic classes
+ * @timer_priv: PTP timer private data pointer
+ * @ptp_tx_irq: PTP tx irq
+ * @ptp_rx_irq: PTP rx irq
+ * @rtc_irq: PTP RTC irq
+ * @qbv_irq: QBV shed irq
+ * @ptp_ts_type: ptp time stamp type - 1 or 2 step mode
+ * @ptp_rx_hw_pointer: ptp rx hw pointer
+ * @ptp_rx_sw_pointer: ptp rx sw pointer
+ * @ptp_txq: PTP tx queue header
+ * @tx_tstamp_work: PTP timestamping work queue
+ * @ptp_tx_lock: PTP tx lock
+ * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
+ * @eth_irq: Axi Ethernet IRQ number
* @options: AxiEthernet option word
* @last_link: Phy link state in which the PHY was negotiated earlier
* @features: Stores the extended features supported by the axienet hw
- * @tx_bd_v: Virtual address of the TX buffer descriptor ring
- * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
- * @rx_bd_v: Virtual address of the RX buffer descriptor ring
- * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
- * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
- * accessed currently. Used while alloc. BDs before a TX starts
- * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
- * accessed currently. Used while processing BDs after the TX
- * completed.
- * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
- * accessed currently.
* @max_frm_size: Stores the maximum size of the frame that can be that
* Txed/Rxed in the existing hardware. If jumbo option is
* supported, the maximum frame size would be 9k. Else it is
@@ -414,6 +658,28 @@ struct axidma_bd {
* @csum_offload_on_rx_path: Stores the checksum selection on RX side.
* @coalesce_count_rx: Store the irq coalesce on RX side.
* @coalesce_count_tx: Store the irq coalesce on TX side.
+ * @phy_interface: Phy interface type.
+ * @phy_flags: Phy interface flags.
+ * @eth_hasnobuf: Ethernet is configured in Non buf mode.
+ * @eth_hasptp: Ethernet is configured for ptp.
+ * @axienet_config: Ethernet config structure
+ * @tx_ts_regs: Base address for the axififo device address space.
+ * @rx_ts_regs: Base address for the rx axififo device address space.
+ * @tstamp_config: Hardware timestamp config structure.
+ * @tx_ptpheader: Stores the tx ptp header.
+ * @aclk: AXI4-Lite clock for ethernet and dma.
+ * @eth_sclk: AXI4-Stream interface clock.
+ * @eth_refclk: Stable clock used by signal delay primitives and transceivers.
+ * @eth_dclk: Dynamic Reconfiguration Port(DRP) clock.
+ * @dma_sg_clk: DMA Scatter Gather Clock.
+ * @dma_rx_clk: DMA S2MM Primary Clock.
+ * @dma_tx_clk: DMA MM2S Primary Clock.
+ * @qnum: Axi Ethernet queue number to be operate on.
+ * @chan_num: MCDMA Channel number to be operate on.
+ * @chan_id: MCMDA Channel id used in conjunction with weight parameter.
+ * @weight: MCDMA Channel weight value to be configured for.
+ * @usxgmii_rate: USXGMII PHY speed.
+ * @dma_mask: Specify the width of the DMA address space.
*/
struct axienet_local {
struct net_device *ndev;
@@ -427,35 +693,179 @@ struct axienet_local {
/* IO registers, dma functions and IRQs */
void __iomem *regs;
- void __iomem *dma_regs;
+ void __iomem *mcdma_regs;
- struct tasklet_struct dma_err_tasklet;
+ struct tasklet_struct dma_err_tasklet[XAE_MAX_QUEUES];
+ struct napi_struct napi[XAE_MAX_QUEUES]; /* NAPI Structure */
+
+ #define XAE_TEMAC1 0
+ #define XAE_TEMAC2 1
+ u8 temac_no;
+ u16 num_tx_queues; /* Number of TX DMA queues */
+ u16 num_rx_queues; /* Number of RX DMA queues */
+ struct axienet_dma_q *dq[XAE_MAX_QUEUES]; /* DAM queue data*/
- int tx_irq;
- int rx_irq;
phy_interface_t phy_mode;
+ bool is_tsn;
+#ifdef CONFIG_XILINX_TSN
+ u16 num_tc; /* Number of TSN Traffic classes */
+#ifdef CONFIG_XILINX_TSN_PTP
+ void *timer_priv;
+ int ptp_tx_irq;
+ int ptp_rx_irq;
+ int rtc_irq;
+ int qbv_irq;
+ int ptp_ts_type;
+ u8 ptp_rx_hw_pointer;
+ u8 ptp_rx_sw_pointer;
+ struct sk_buff_head ptp_txq;
+ struct work_struct tx_tstamp_work;
+ spinlock_t ptp_tx_lock; /* TSN PTP tx lock*/
+#endif
+#endif
+ int eth_irq;
+
u32 options; /* Current options word */
u32 last_link;
u32 features;
+ u32 max_frm_size;
+ u32 rxmem;
+
+ int csum_offload_on_tx_path;
+ int csum_offload_on_rx_path;
+
+ u32 coalesce_count_rx;
+ u32 coalesce_count_tx;
+ u32 phy_interface;
+ u32 phy_flags;
+ bool eth_hasnobuf;
+ bool eth_hasptp;
+ const struct axienet_config *axienet_config;
+
+#if defined (CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined (CONFIG_XILINX_TSN_PTP)
+ void __iomem *tx_ts_regs;
+ void __iomem *rx_ts_regs;
+ struct hwtstamp_config tstamp_config;
+ u8 *tx_ptpheader;
+#endif
+ struct clk *aclk;
+ struct clk *eth_sclk;
+ struct clk *eth_refclk;
+ struct clk *eth_dclk;
+ struct clk *dma_sg_clk;
+ struct clk *dma_rx_clk;
+ struct clk *dma_tx_clk;
+
+ /* MCDMA Fields */
+ int qnum[XAE_MAX_QUEUES];
+ int chan_num[XAE_MAX_QUEUES];
+ /* WRR Fields */
+ u16 chan_id;
+ u16 weight;
+
+ u32 usxgmii_rate;
+ u8 dma_mask;
+};
+
+/**
+ * struct axienet_dma_q - axienet private per dma queue data
+ * @lp: Parent pointer
+ * @dma_regs: Base address for the axidma device address space
+ * @tx_irq: Axidma TX IRQ number
+ * @rx_irq: Axidma RX IRQ number
+ * @tx_lock: Spin lock for tx path
+ * @rx_lock: Spin lock for tx path
+ * @tx_bd_v: Virtual address of the TX buffer descriptor ring
+ * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
+ * @rx_bd_v: Virtual address of the RX buffer descriptor ring
+ * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
+ * @tx_buf: Virtual address of the Tx buffer pool used by the driver when
+ * DMA h/w is configured without DRE.
+ * @tx_bufs: Virutal address of the Tx buffer address.
+ * @tx_bufs_dma: Physical address of the Tx buffer address used by the driver
+ * when DMA h/w is configured without DRE.
+ * @eth_hasdre: Tells whether DMA h/w is configured with dre or not.
+ * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
+ * accessed currently. Used while alloc. BDs before a TX starts
+ * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
+ * accessed currently. Used while processing BDs after the TX
+ * completed.
+ * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
+ * accessed currently.
+ * @chan_id: MCDMA channel to operate on.
+ * @rx_offset: MCDMA S2MM channel starting offset.
+ * @txq_bd_v: Virtual address of the MCDMA TX buffer descriptor ring
+ * @rxq_bd_v: Virtual address of the MCDMA RX buffer descriptor ring
+ * @tx_packets: Number of transmit packets processed by the dma queue.
+ * @tx_bytes: Number of transmit bytes processed by the dma queue.
+ * @rx_packets: Number of receive packets processed by the dma queue.
+ * @rx_bytes: Number of receive bytes processed by the dma queue.
+ */
+struct axienet_dma_q {
+ struct axienet_local *lp; /* parent */
+ void __iomem *dma_regs;
+
+ int tx_irq;
+ int rx_irq;
+
+ spinlock_t tx_lock; /* tx lock */
+ spinlock_t rx_lock; /* rx lock */
+
/* Buffer descriptors */
struct axidma_bd *tx_bd_v;
- dma_addr_t tx_bd_p;
struct axidma_bd *rx_bd_v;
dma_addr_t rx_bd_p;
+ dma_addr_t tx_bd_p;
+
+ unsigned char *tx_buf[XAE_TX_BUFFERS];
+ unsigned char *tx_bufs;
+ dma_addr_t tx_bufs_dma;
+ bool eth_hasdre;
+
u32 tx_bd_ci;
- u32 tx_bd_tail;
u32 rx_bd_ci;
+ u32 tx_bd_tail;
- u32 max_frm_size;
- u32 rxmem;
+ /* MCDMA fields */
+ u16 chan_id;
+ u32 rx_offset;
+ struct aximcdma_bd *txq_bd_v;
+ struct aximcdma_bd *rxq_bd_v;
- int csum_offload_on_tx_path;
- int csum_offload_on_rx_path;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+};
- u32 coalesce_count_rx;
- u32 coalesce_count_tx;
+#define AXIENET_TX_SSTATS_LEN(lp) ((lp)->num_tx_queues * 2)
+#define AXIENET_RX_SSTATS_LEN(lp) ((lp)->num_rx_queues * 2)
+
+/**
+ * enum axienet_ip_type - AXIENET IP/MAC type.
+ *
+ * @XAXIENET_1G: IP is 1G MAC
+ * @XAXIENET_2_5G: IP type is 2.5G MAC.
+ * @XAXIENET_LEGACY_10G: IP type is legacy 10G MAC.
+ * @XAXIENET_10G_25G: IP type is 10G/25G MAC(XXV MAC).
+ *
+ */
+enum axienet_ip_type {
+ XAXIENET_1G = 0,
+ XAXIENET_2_5G,
+ XAXIENET_LEGACY_10G,
+ XAXIENET_10G_25G,
+};
+
+struct axienet_config {
+ enum axienet_ip_type mactype;
+ void (*setoptions)(struct net_device *ndev, u32 options);
+ int (*clk_init)(struct platform_device *pdev, struct clk **axi_aclk,
+ struct clk **axis_clk, struct clk **ref_clk,
+ struct clk **dclk);
+ u32 tx_ptplen;
};
/**
@@ -470,6 +880,12 @@ struct axienet_option {
u32 m_or;
};
+struct xxvenet_option {
+ u32 opt;
+ u32 reg;
+ u32 m_or;
+};
+
/**
* axienet_ior - Memory mapped Axi Ethernet register read
* @lp: Pointer to axienet local structure
@@ -504,9 +920,182 @@ static inline void axienet_iow(struct axienet_local *lp, off_t offset,
out_be32((lp->regs + offset), value);
}
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+/**
+ * axienet_txts_ior - Memory mapped AXI FIFO MM S register read
+ * @lp: Pointer to axienet_local structure
+ * @reg: Address offset from the base address of AXI FIFO MM S
+ * core
+ *
+ * Return: the contents of the AXI FIFO MM S register
+ */
+
+static inline u32 axienet_txts_ior(struct axienet_local *lp, off_t reg)
+{
+ return in_be32(lp->tx_ts_regs + reg);
+}
+
+/**
+ * axienet_txts_iow - Memory mapper AXI FIFO MM S register write
+ * @lp: Pointer to axienet_local structure
+ * @reg: Address offset from the base address of AXI FIFO MM S
+ * core.
+ * @value: Value to be written into the AXI FIFO MM S register
+ */
+static inline void axienet_txts_iow(struct axienet_local *lp, off_t reg,
+ u32 value)
+{
+ out_be32((lp->tx_ts_regs + reg), value);
+}
+
+/**
+ * axienet_rxts_ior - Memory mapped AXI FIFO MM S register read
+ * @lp: Pointer to axienet_local structure
+ * @reg: Address offset from the base address of AXI FIFO MM S
+ * core
+ *
+ * Return: the contents of the AXI FIFO MM S register
+ */
+
+static inline u32 axienet_rxts_ior(struct axienet_local *lp, off_t reg)
+{
+ return in_be32(lp->rx_ts_regs + reg);
+}
+
+/**
+ * axienet_rxts_iow - Memory mapper AXI FIFO MM S register write
+ * @lp: Pointer to axienet_local structure
+ * @reg: Address offset from the base address of AXI FIFO MM S
+ * core.
+ * @value: Value to be written into the AXI FIFO MM S register
+ */
+static inline void axienet_rxts_iow(struct axienet_local *lp, off_t reg,
+ u32 value)
+{
+ out_be32((lp->rx_ts_regs + reg), value);
+}
+#endif
+
+/**
+ * axienet_dma_in32 - Memory mapped Axi DMA register read
+ * @q: Pointer to DMA queue structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ *
+ * Return: The contents of the Axi DMA register
+ *
+ * This function returns the contents of the corresponding Axi DMA register.
+ */
+static inline u32 axienet_dma_in32(struct axienet_dma_q *q, off_t reg)
+{
+ return in_be32(q->dma_regs + reg);
+}
+
+/**
+ * axienet_dma_out32 - Memory mapped Axi DMA register write.
+ * @q: Pointer to DMA queue structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ * @value: Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+static inline void axienet_dma_out32(struct axienet_dma_q *q,
+ off_t reg, u32 value)
+{
+ out_be32((q->dma_regs + reg), value);
+}
+
+/**
+ * axienet_dma_bdout - Memory mapped Axi DMA register Buffer Descriptor write.
+ * @q: Pointer to DMA queue structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ * @value: Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+static inline void axienet_dma_bdout(struct axienet_dma_q *q,
+ off_t reg, dma_addr_t value)
+{
+#if defined(CONFIG_PHYS_ADDR_T_64BIT)
+ writeq(value, (q->dma_regs + reg));
+#else
+ writel(value, (q->dma_regs + reg));
+#endif
+}
+
/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np);
int axienet_mdio_wait_until_ready(struct axienet_local *lp);
void axienet_mdio_teardown(struct axienet_local *lp);
+#ifdef CONFIG_XILINX_TSN_PTP
+void axienet_tx_tstamp(struct work_struct *work);
+#endif
+#ifdef CONFIG_XILINX_TSN_QBV
+int axienet_qbv_init(struct net_device *ndev);
+void axienet_qbv_remove(struct net_device *ndev);
+int axienet_set_schedule(struct net_device *ndev, void __user *useraddr);
+int axienet_get_schedule(struct net_device *ndev, void __user *useraddr);
+#endif
+
+#ifdef CONFIG_XILINX_TSN_QBR
+int axienet_preemption(struct net_device *ndev, void __user *useraddr);
+int axienet_preemption_ctrl(struct net_device *ndev, void __user *useraddr);
+int axienet_preemption_sts(struct net_device *ndev, void __user *useraddr);
+int axienet_preemption_cnt(struct net_device *ndev, void __user *useraddr);
+#ifdef CONFIG_XILINX_TSN_QBV
+int axienet_qbu_user_override(struct net_device *ndev, void __user *useraddr);
+int axienet_qbu_sts(struct net_device *ndev, void __user *useraddr);
+#endif
+#endif
+
+void __maybe_unused axienet_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q);
+int __maybe_unused axienet_dma_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q);
+void axienet_dma_err_handler(unsigned long data);
+irqreturn_t __maybe_unused axienet_tx_irq(int irq, void *_ndev);
+irqreturn_t __maybe_unused axienet_rx_irq(int irq, void *_ndev);
+void axienet_start_xmit_done(struct net_device *ndev, struct axienet_dma_q *q);
+void axienet_dma_bd_release(struct net_device *ndev);
+void __axienet_device_reset(struct axienet_dma_q *q, off_t offset);
+void axienet_set_mac_address(struct net_device *ndev, const void *address);
+void axienet_set_multicast_list(struct net_device *ndev);
+int xaxienet_rx_poll(struct napi_struct *napi, int quota);
+
+#if defined(CONFIG_AXIENET_HAS_MCDMA)
+int __maybe_unused axienet_mcdma_rx_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q);
+int __maybe_unused axienet_mcdma_tx_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q);
+void __maybe_unused axienet_mcdma_tx_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q);
+void __maybe_unused axienet_mcdma_rx_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q);
+irqreturn_t __maybe_unused axienet_mcdma_tx_irq(int irq, void *_ndev);
+irqreturn_t __maybe_unused axienet_mcdma_rx_irq(int irq, void *_ndev);
+void __maybe_unused axienet_mcdma_err_handler(unsigned long data);
+void axienet_strings(struct net_device *ndev, u32 sset, u8 *data);
+int axienet_sset_count(struct net_device *ndev, int sset);
+void axienet_get_stats(struct net_device *ndev,
+ struct ethtool_stats *stats,
+ u64 *data);
+int axeinet_mcdma_create_sysfs(struct kobject *kobj);
+void axeinet_mcdma_remove_sysfs(struct kobject *kobj);
+int __maybe_unused axienet_mcdma_tx_probe(struct platform_device *pdev,
+ struct device_node *np,
+ struct axienet_local *lp);
+int __maybe_unused axienet_mcdma_rx_probe(struct platform_device *pdev,
+ struct axienet_local *lp,
+ struct net_device *ndev);
+#endif
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+void axienet_tx_hwtstamp(struct axienet_local *lp,
+ struct aximcdma_bd *cur_p);
+#else
+void axienet_tx_hwtstamp(struct axienet_local *lp,
+ struct axidma_bd *cur_p);
+#endif
#endif /* XILINX_AXI_ENET_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_dma.c b/drivers/net/ethernet/xilinx/xilinx_axienet_dma.c
new file mode 100644
index 000000000000..c5443fe005c4
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_dma.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Xilinx AXI Ethernet (DMA programming)
+ *
+ * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
+ * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
+ * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
+ * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2010 - 2012 Xilinx, Inc.
+ * Copyright (C) 2018 Xilinx, Inc. All rights reserved.
+ *
+ * This file contains helper functions for AXI DMA TX and RX programming.
+ */
+
+#include "xilinx_axienet.h"
+
+/**
+ * axienet_bd_free - Release buffer descriptor rings for individual dma queue
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * This function is helper function to axienet_dma_bd_release.
+ */
+
+void __maybe_unused axienet_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ int i;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ dma_unmap_single(ndev->dev.parent, q->rx_bd_v[i].phys,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ dev_kfree_skb((struct sk_buff *)
+ (q->rx_bd_v[i].sw_id_offset));
+ }
+
+ if (q->rx_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*q->rx_bd_v) * RX_BD_NUM,
+ q->rx_bd_v,
+ q->rx_bd_p);
+ }
+ if (q->tx_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*q->tx_bd_v) * TX_BD_NUM,
+ q->tx_bd_v,
+ q->tx_bd_p);
+ }
+ if (q->tx_bufs) {
+ dma_free_coherent(ndev->dev.parent,
+ XAE_MAX_PKT_LEN * TX_BD_NUM,
+ q->tx_bufs,
+ q->tx_bufs_dma);
+ }
+}
+
+/**
+ * __dma_txq_init - Setup buffer descriptor rings for individual Axi DMA-Tx
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_q_init
+ */
+static int __dma_txq_init(struct net_device *ndev, struct axienet_dma_q *q)
+{
+ int i;
+ u32 cr;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+
+ q->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*q->tx_bd_v) * TX_BD_NUM,
+ &q->tx_bd_p, GFP_KERNEL);
+ if (!q->tx_bd_v)
+ goto out;
+
+ for (i = 0; i < TX_BD_NUM; i++) {
+ q->tx_bd_v[i].next = q->tx_bd_p +
+ sizeof(*q->tx_bd_v) *
+ ((i + 1) % TX_BD_NUM);
+ }
+
+ if (!q->eth_hasdre) {
+ q->tx_bufs = dma_alloc_coherent(ndev->dev.parent,
+ XAE_MAX_PKT_LEN * TX_BD_NUM,
+ &q->tx_bufs_dma,
+ GFP_KERNEL);
+ if (!q->tx_bufs)
+ goto out;
+
+ for (i = 0; i < TX_BD_NUM; i++)
+ q->tx_buf[i] = &q->tx_bufs[i * XAE_MAX_PKT_LEN];
+ }
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
+ ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_bdout(q, XAXIDMA_TX_CDESC_OFFSET, q->tx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ return 0;
+out:
+ return -ENOMEM;
+}
+
+/**
+ * __dma_rxq_init - Setup buffer descriptor rings for individual Axi DMA-Rx
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_q_init
+ */
+static int __dma_rxq_init(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ int i;
+ u32 cr;
+ struct sk_buff *skb;
+ struct axienet_local *lp = netdev_priv(ndev);
+ /* Reset the indexes which are used for accessing the BDs */
+ q->rx_bd_ci = 0;
+
+ /* Allocate the Rx buffer descriptors. */
+ q->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*q->rx_bd_v) * RX_BD_NUM,
+ &q->rx_bd_p, GFP_KERNEL);
+ if (!q->rx_bd_v)
+ goto out;
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ q->rx_bd_v[i].next = q->rx_bd_p +
+ sizeof(*q->rx_bd_v) *
+ ((i + 1) % RX_BD_NUM);
+
+ skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ if (!skb)
+ goto out;
+
+ /* Ensure that the skb is completely updated
+ * prio to mapping the DMA
+ */
+ wmb();
+
+ q->rx_bd_v[i].sw_id_offset = (phys_addr_t)skb;
+ q->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+ skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ q->rx_bd_v[i].cntrl = lp->max_frm_size;
+ }
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
+ ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XAXIDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_bdout(q, XAXIDMA_RX_CDESC_OFFSET, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, q->rx_bd_p +
+ (sizeof(*q->rx_bd_v) * (RX_BD_NUM - 1)));
+
+ return 0;
+out:
+ return -ENOMEM;
+}
+
+/**
+ * axienet_dma_q_init - Setup buffer descriptor rings for individual Axi DMA
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_bd_init
+ */
+int __maybe_unused axienet_dma_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ if (__dma_txq_init(ndev, q))
+ goto out;
+
+ if (__dma_rxq_init(ndev, q))
+ goto out;
+
+ return 0;
+out:
+ axienet_dma_bd_release(ndev);
+ return -ENOMEM;
+}
+
+/**
+ * map_dma_q_irq - Map dma q based on interrupt number.
+ * @irq: irq number
+ * @lp: axienet local structure
+ *
+ * Return: DMA queue.
+ *
+ * This returns the DMA number on which interrupt has occurred.
+ */
+static int map_dma_q_irq(int irq, struct axienet_local *lp)
+{
+ int i;
+
+ for_each_rx_dma_queue(lp, i) {
+ if (irq == lp->dq[i]->tx_irq || irq == lp->dq[i]->rx_irq)
+ return i;
+ }
+ pr_err("Error mapping DMA irq\n");
+ return -ENODEV;
+}
+
+/**
+ * axienet_tx_irq - Tx Done Isr.
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED or IRQ_NONE.
+ *
+ * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
+ * to complete the BD processing.
+ */
+irqreturn_t __maybe_unused axienet_tx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ int i = map_dma_q_irq(irq, lp);
+ struct axienet_dma_q *q;
+
+ if (i < 0)
+ return IRQ_NONE;
+
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XAXIDMA_TX_SR_OFFSET);
+ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(q, XAXIDMA_TX_SR_OFFSET, status);
+ axienet_start_xmit_done(lp->ndev, q);
+ goto out;
+ }
+
+ if (!(status & XAXIDMA_IRQ_ALL_MASK))
+ dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
+
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: %pa\n",
+ &q->tx_bd_v[q->tx_bd_ci].phys);
+
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
+
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XAXIDMA_TX_SR_OFFSET, status);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * axienet_rx_irq - Rx Isr.
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED or IRQ_NONE.
+ *
+ * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
+ * processing.
+ */
+irqreturn_t __maybe_unused axienet_rx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ int i = map_dma_q_irq(irq, lp);
+ struct axienet_dma_q *q;
+
+ if (i < 0)
+ return IRQ_NONE;
+
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
+ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+ napi_schedule(&lp->napi[i]);
+ }
+
+ if (!(status & XAXIDMA_IRQ_ALL_MASK))
+ dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
+
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: %pa\n",
+ &q->rx_bd_v[q->rx_bd_ci].phys);
+
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
+
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* write to the Rx channel control register */
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XAXIDMA_RX_SR_OFFSET, status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
+ * @data: Data passed
+ *
+ * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
+ * Tx/Rx BDs.
+ */
+void __maybe_unused axienet_dma_err_handler(unsigned long data)
+{
+ u32 axienet_status;
+ u32 cr, i;
+ int mdio_mcreg = 0;
+ struct axienet_dma_q *q = (struct axienet_dma_q *)data;
+ struct axienet_local *lp = q->lp;
+ struct net_device *ndev = lp->ndev;
+ struct axidma_bd *cur_p;
+
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
+ axienet_mdio_wait_until_ready(lp);
+ /* Disable the MDIO interface till Axi Ethernet Reset is
+ * Completed. When we do an Axi Ethernet reset, it resets the
+ * Complete core including the MDIO. So if MDIO is not disabled
+ * When the reset process is started,
+ * MDIO will be broken afterwards.
+ */
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
+ ~XAE_MDIO_MC_MDIOEN_MASK));
+ }
+
+ __axienet_device_reset(q, XAXIDMA_TX_CR_OFFSET);
+ __axienet_device_reset(q, XAXIDMA_RX_CR_OFFSET);
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
+ axienet_mdio_wait_until_ready(lp);
+ }
+
+ for (i = 0; i < TX_BD_NUM; i++) {
+ cur_p = &q->tx_bd_v[i];
+ if (cur_p->phys)
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ (cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ if (cur_p->tx_skb)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->tx_skb);
+ cur_p->phys = 0;
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ cur_p->sw_id_offset = 0;
+ cur_p->tx_skb = 0;
+ }
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ cur_p = &q->rx_bd_v[i];
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ }
+
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+ q->rx_bd_ci = 0;
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
+ (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XAXIDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Finally write to the Rx channel control register */
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
+ (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_bdout(q, XAXIDMA_RX_CDESC_OFFSET, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, q->rx_bd_p +
+ (sizeof(*q->rx_bd_v) * (RX_BD_NUM - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting
+ */
+ axienet_dma_bdout(q, XAXIDMA_TX_CDESC_OFFSET, q->tx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
+ axienet_status &= ~XAE_RCW1_RX_MASK;
+ axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ }
+
+ if (lp->axienet_config->mactype == XAXIENET_1G && !lp->eth_hasnobuf) {
+ axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
+ if (axienet_status & XAE_INT_RXRJECT_MASK)
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ axienet_set_mac_address(ndev, NULL);
+ axienet_set_multicast_list(ndev);
+ lp->axienet_config->setoptions(ndev, lp->options);
+}
+
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 65c16772e589..e7ceee629eed 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -21,6 +21,7 @@
* - Add support for extended VLAN support.
*/
+#include <linux/circ_buf.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
@@ -30,34 +31,40 @@
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/of_net.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/phy.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/iopoll.h>
+#include <linux/ptp_classify.h>
+#include <linux/net_tstamp.h>
+#include <linux/random.h>
+#include <net/sock.h>
+#include <linux/xilinx_phy.h>
+#include <linux/clk.h>
#include "xilinx_axienet.h"
-/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
-#define TX_BD_NUM 64
-#define RX_BD_NUM 128
-
+#ifdef CONFIG_XILINX_TSN_PTP
+#include "xilinx_tsn_ptp.h"
+#include "xilinx_tsn_timer.h"
+#endif
/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
#define DRIVER_NAME "xaxienet"
#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
#define DRIVER_VERSION "1.00a"
#define AXIENET_REGS_N 32
+#define AXIENET_TS_HEADER_LEN 8
+#define XXVENET_TS_HEADER_LEN 4
+#define NS_PER_SEC 1000000000ULL /* Nanoseconds per second */
-/* Match table for of_platform binding */
-static const struct of_device_id axienet_of_match[] = {
- { .compatible = "xlnx,axi-ethernet-1.00.a", },
- { .compatible = "xlnx,axi-ethernet-1.01.a", },
- { .compatible = "xlnx,axi-ethernet-2.01.a", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, axienet_of_match);
+#ifdef CONFIG_XILINX_TSN_PTP
+int axienet_phc_index = -1;
+EXPORT_SYMBOL(axienet_phc_index);
+#endif
/* Option table for setting up Axi Ethernet hardware options */
static struct axienet_option axienet_options[] = {
@@ -100,8 +107,8 @@ static struct axienet_option axienet_options[] = {
.m_or = XAE_FCC_FCTX_MASK,
}, { /* Turn on promiscuous frame filtering */
.opt = XAE_OPTION_PROMISC,
- .reg = XAE_FMI_OFFSET,
- .m_or = XAE_FMI_PM_MASK,
+ .reg = XAE_FMC_OFFSET,
+ .m_or = XAE_FMC_PM_MASK,
}, { /* Enable transmitter */
.opt = XAE_OPTION_TXEN,
.reg = XAE_TC_OFFSET,
@@ -114,34 +121,27 @@ static struct axienet_option axienet_options[] = {
{}
};
-/**
- * axienet_dma_in32 - Memory mapped Axi DMA register read
- * @lp: Pointer to axienet local structure
- * @reg: Address offset from the base address of the Axi DMA core
- *
- * Return: The contents of the Axi DMA register
- *
- * This function returns the contents of the corresponding Axi DMA register.
- */
-static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
-{
- return in_be32(lp->dma_regs + reg);
-}
-
-/**
- * axienet_dma_out32 - Memory mapped Axi DMA register write.
- * @lp: Pointer to axienet local structure
- * @reg: Address offset from the base address of the Axi DMA core
- * @value: Value to be written into the Axi DMA register
- *
- * This function writes the desired value into the corresponding Axi DMA
- * register.
- */
-static inline void axienet_dma_out32(struct axienet_local *lp,
- off_t reg, u32 value)
-{
- out_be32((lp->dma_regs + reg), value);
-}
+/* Option table for setting up Axi Ethernet hardware options */
+static struct xxvenet_option xxvenet_options[] = {
+ { /* Turn on FCS stripping on receive packets */
+ .opt = XAE_OPTION_FCS_STRIP,
+ .reg = XXV_RCW1_OFFSET,
+ .m_or = XXV_RCW1_FCS_MASK,
+ }, { /* Turn on FCS insertion on transmit packets */
+ .opt = XAE_OPTION_FCS_INSERT,
+ .reg = XXV_TC_OFFSET,
+ .m_or = XXV_TC_FCS_MASK,
+ }, { /* Enable transmitter */
+ .opt = XAE_OPTION_TXEN,
+ .reg = XXV_TC_OFFSET,
+ .m_or = XXV_TC_TX_MASK,
+ }, { /* Enable receiver */
+ .opt = XAE_OPTION_RXEN,
+ .reg = XXV_RCW1_OFFSET,
+ .m_or = XXV_RCW1_RX_MASK,
+ },
+ {}
+};
/**
* axienet_dma_bd_release - Release buffer descriptor rings
@@ -151,29 +151,22 @@ static inline void axienet_dma_out32(struct axienet_local *lp,
* axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
* driver stop api is called.
*/
-static void axienet_dma_bd_release(struct net_device *ndev)
+void axienet_dma_bd_release(struct net_device *ndev)
{
int i;
struct axienet_local *lp = netdev_priv(ndev);
- for (i = 0; i < RX_BD_NUM; i++) {
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
- lp->max_frm_size, DMA_FROM_DEVICE);
- dev_kfree_skb((struct sk_buff *)
- (lp->rx_bd_v[i].sw_id_offset));
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ for_each_tx_dma_queue(lp, i) {
+ axienet_mcdma_tx_bd_free(ndev, lp->dq[i]);
}
-
- if (lp->rx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- lp->rx_bd_v,
- lp->rx_bd_p);
- }
- if (lp->tx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- lp->tx_bd_v,
- lp->tx_bd_p);
+#endif
+ for_each_rx_dma_queue(lp, i) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_mcdma_rx_bd_free(ndev, lp->dq[i]);
+#else
+ axienet_bd_free(ndev, lp->dq[i]);
+#endif
}
}
@@ -189,101 +182,29 @@ static void axienet_dma_bd_release(struct net_device *ndev)
*/
static int axienet_dma_bd_init(struct net_device *ndev)
{
- u32 cr;
- int i;
- struct sk_buff *skb;
+ int i, ret;
struct axienet_local *lp = netdev_priv(ndev);
- /* Reset the indexes which are used for accessing the BDs */
- lp->tx_bd_ci = 0;
- lp->tx_bd_tail = 0;
- lp->rx_bd_ci = 0;
-
- /* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
- if (!lp->tx_bd_v)
- goto out;
-
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
- if (!lp->rx_bd_v)
- goto out;
-
- for (i = 0; i < TX_BD_NUM; i++) {
- lp->tx_bd_v[i].next = lp->tx_bd_p +
- sizeof(*lp->tx_bd_v) *
- ((i + 1) % TX_BD_NUM);
- }
-
- for (i = 0; i < RX_BD_NUM; i++) {
- lp->rx_bd_v[i].next = lp->rx_bd_p +
- sizeof(*lp->rx_bd_v) *
- ((i + 1) % RX_BD_NUM);
-
- skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
- if (!skb)
- goto out;
-
- lp->rx_bd_v[i].sw_id_offset = (u32) skb;
- lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
- skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
- lp->rx_bd_v[i].cntrl = lp->max_frm_size;
- }
-
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting.
- */
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ for_each_tx_dma_queue(lp, i) {
+ ret = axienet_mcdma_tx_q_init(ndev, lp->dq[i]);
+ if (ret != 0)
+ break;
+ }
+#endif
+ for_each_rx_dma_queue(lp, i) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ ret = axienet_mcdma_rx_q_init(ndev, lp->dq[i]);
+#else
+ ret = axienet_dma_q_init(ndev, lp->dq[i]);
+#endif
+ if (ret != 0) {
+ netdev_err(ndev, "%s: Failed to init DMA buf\n", __func__);
+ break;
+ }
+ }
- return 0;
-out:
- axienet_dma_bd_release(ndev);
- return -ENOMEM;
+ return ret;
}
/**
@@ -294,16 +215,19 @@ out:
* This function is called to initialize the MAC address of the Axi Ethernet
* core. It writes to the UAW0 and UAW1 registers of the core.
*/
-static void axienet_set_mac_address(struct net_device *ndev,
- const void *address)
+void axienet_set_mac_address(struct net_device *ndev, const void *address)
{
struct axienet_local *lp = netdev_priv(ndev);
if (address)
- memcpy(ndev->dev_addr, address, ETH_ALEN);
+ ether_addr_copy(ndev->dev_addr, address);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
+ if (lp->axienet_config->mactype != XAXIENET_1G &&
+ lp->axienet_config->mactype != XAXIENET_2_5G)
+ return;
+
/* Set up unicast MAC address filter set its mac address */
axienet_iow(lp, XAE_UAW0_OFFSET,
(ndev->dev_addr[0]) |
@@ -331,6 +255,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
static int netdev_set_mac_address(struct net_device *ndev, void *p)
{
struct sockaddr *addr = p;
+
axienet_set_mac_address(ndev, addr->sa_data);
return 0;
}
@@ -346,12 +271,15 @@ static int netdev_set_mac_address(struct net_device *ndev, void *p)
* means whenever the multicast table entries need to be updated this
* function gets called.
*/
-static void axienet_set_multicast_list(struct net_device *ndev)
+void axienet_set_multicast_list(struct net_device *ndev)
{
int i;
u32 reg, af0reg, af1reg;
struct axienet_local *lp = netdev_priv(ndev);
+ if ((lp->axienet_config->mactype != XAXIENET_1G) || lp->eth_hasnobuf)
+ return;
+
if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
/* We must make the kernel realize we had to move into
@@ -359,9 +287,9 @@ static void axienet_set_multicast_list(struct net_device *ndev)
* the flag is already set. If not we set it.
*/
ndev->flags |= IFF_PROMISC;
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
- reg |= XAE_FMI_PM_MASK;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ reg = axienet_ior(lp, XAE_FMC_OFFSET);
+ reg |= XAE_FMC_PM_MASK;
+ axienet_iow(lp, XAE_FMC_OFFSET, reg);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
} else if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
@@ -379,25 +307,25 @@ static void axienet_set_multicast_list(struct net_device *ndev)
af1reg = (ha->addr[4]);
af1reg |= (ha->addr[5] << 8);
- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg = axienet_ior(lp, XAE_FMC_OFFSET) & 0xFFFFFF00;
reg |= i;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ axienet_iow(lp, XAE_FMC_OFFSET, reg);
axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
i++;
}
} else {
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
- reg &= ~XAE_FMI_PM_MASK;
+ reg = axienet_ior(lp, XAE_FMC_OFFSET);
+ reg &= ~XAE_FMC_PM_MASK;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ axienet_iow(lp, XAE_FMC_OFFSET, reg);
for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg = axienet_ior(lp, XAE_FMC_OFFSET) & 0xFFFFFF00;
reg |= i;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ axienet_iow(lp, XAE_FMC_OFFSET, reg);
axienet_iow(lp, XAE_AF0_OFFSET, 0);
axienet_iow(lp, XAE_AF1_OFFSET, 0);
}
@@ -434,7 +362,24 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
-static void __axienet_device_reset(struct axienet_local *lp, off_t offset)
+static void xxvenet_setoptions(struct net_device *ndev, u32 options)
+{
+ int reg;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct xxvenet_option *tp = &xxvenet_options[0];
+
+ while (tp->opt) {
+ reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
+ if (options & tp->opt)
+ reg |= tp->m_or;
+ axienet_iow(lp, tp->reg, reg);
+ tp++;
+ }
+
+ lp->options |= options;
+}
+
+void __axienet_device_reset(struct axienet_dma_q *q, off_t offset)
{
u32 timeout;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
@@ -442,12 +387,12 @@ static void __axienet_device_reset(struct axienet_local *lp, off_t offset)
* commands/transfers will be flushed or completed during this
* reset process.
*/
- axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
+ axienet_dma_out32(q, offset, XAXIDMA_CR_RESET_MASK);
timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
+ while (axienet_dma_in32(q, offset) & XAXIDMA_CR_RESET_MASK) {
udelay(1);
if (--timeout == 0) {
- netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
+ netdev_err(q->lp->ndev, "%s: DMA reset timeout!\n",
__func__);
break;
}
@@ -469,46 +414,103 @@ static void axienet_device_reset(struct net_device *ndev)
{
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
+ u32 err, val;
+ struct axienet_dma_q *q;
+ u32 i;
+
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ /* Reset the XXV MAC */
+ val = axienet_ior(lp, XXV_GT_RESET_OFFSET);
+ val |= XXV_GT_RESET_MASK;
+ axienet_iow(lp, XXV_GT_RESET_OFFSET, val);
+ /* Wait for 1ms for GT reset to complete as per spec */
+ mdelay(1);
+ val = axienet_ior(lp, XXV_GT_RESET_OFFSET);
+ val &= ~XXV_GT_RESET_MASK;
+ axienet_iow(lp, XXV_GT_RESET_OFFSET, val);
+ }
- __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ for_each_rx_dma_queue(lp, i) {
+ q = lp->dq[i];
+ __axienet_device_reset(q, XAXIDMA_TX_CR_OFFSET);
+#ifndef CONFIG_AXIENET_HAS_MCDMA
+ __axienet_device_reset(q, XAXIDMA_RX_CR_OFFSET);
+#endif
+ }
+ }
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
- lp->options |= XAE_OPTION_VLAN;
- lp->options &= (~XAE_OPTION_JUMBO);
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ lp->options |= XAE_OPTION_VLAN;
+ lp->options &= (~XAE_OPTION_JUMBO);
+ }
if ((ndev->mtu > XAE_MTU) &&
(ndev->mtu <= XAE_JUMBO_MTU)) {
lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
XAE_TRL_SIZE;
-
- if (lp->max_frm_size <= lp->rxmem)
+ if (lp->max_frm_size <= lp->rxmem &&
+ (lp->axienet_config->mactype != XAXIENET_10G_25G))
lp->options |= XAE_OPTION_JUMBO;
}
- if (axienet_dma_bd_init(ndev)) {
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ if (axienet_dma_bd_init(ndev)) {
netdev_err(ndev, "%s: descriptor allocation failed\n",
__func__);
+ }
}
- axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
- axienet_status &= ~XAE_RCW1_RX_MASK;
- axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
+ axienet_status &= ~XAE_RCW1_RX_MASK;
+ axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ }
- axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
- if (axienet_status & XAE_INT_RXRJECT_MASK)
- axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ /* Check for block lock bit got set or not
+ * This ensures that 10G ethernet IP
+ * is functioning normally or not.
+ */
+ err = readl_poll_timeout(lp->regs + XXV_STATRX_BLKLCK_OFFSET,
+ val, (val & XXV_RX_BLKLCK_MASK),
+ 10, DELAY_OF_ONE_MILLISEC);
+ if (err) {
+ netdev_err(ndev, "XXV MAC block lock not complete! Cross-check the MAC ref clock configuration\n");
+ }
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (!lp->is_tsn) {
+ axienet_rxts_iow(lp, XAXIFIFO_TXTS_RDFR,
+ XAXIFIFO_TXTS_RESET_MASK);
+ axienet_rxts_iow(lp, XAXIFIFO_TXTS_SRR,
+ XAXIFIFO_TXTS_RESET_MASK);
+ }
+#endif
+ }
- axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+ if ((lp->axienet_config->mactype == XAXIENET_1G) &&
+ !lp->eth_hasnobuf) {
+ axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
+ if (axienet_status & XAE_INT_RXRJECT_MASK)
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+
+ /* Enable Receive errors */
+ axienet_iow(lp, XAE_IE_OFFSET, XAE_INT_RECV_ERROR_MASK);
+ }
+
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ lp->options |= XAE_OPTION_FCS_STRIP;
+ lp->options |= XAE_OPTION_FCS_INSERT;
+ } else {
+ axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+ }
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- /* Sync default options with HW but leave receiver and
- * transmitter disabled.
- */
- axienet_setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
- axienet_setoptions(ndev, lp->options);
+ lp->axienet_config->setoptions(ndev, lp->options);
netif_trans_update(ndev);
}
@@ -545,6 +547,9 @@ static void axienet_adjust_link(struct net_device *ndev)
emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
switch (phy->speed) {
+ case SPEED_2500:
+ emmc_reg |= XAE_EMMC_LINKSPD_2500;
+ break;
case SPEED_1000:
emmc_reg |= XAE_EMMC_LINKSPD_1000;
break;
@@ -555,25 +560,151 @@ static void axienet_adjust_link(struct net_device *ndev)
emmc_reg |= XAE_EMMC_LINKSPD_10;
break;
default:
- dev_err(&ndev->dev, "Speed other than 10, 100 "
- "or 1Gbps is not supported\n");
+ dev_err(&ndev->dev, "Speed other than 10, 100 ");
+ dev_err(&ndev->dev, "or 1Gbps is not supported\n");
break;
}
axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
- lp->last_link = link_state;
phy_print_status(phy);
} else {
netdev_err(ndev,
"Error setting Axi Ethernet mac speed\n");
}
+
+ lp->last_link = link_state;
+ }
+}
+
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+/**
+ * axienet_tx_hwtstamp - Read tx timestamp from hw and update it to the skbuff
+ * @lp: Pointer to axienet local structure
+ * @cur_p: Pointer to the axi_dma/axi_mcdma current bd
+ *
+ * Return: None.
+ */
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+void axienet_tx_hwtstamp(struct axienet_local *lp,
+ struct aximcdma_bd *cur_p)
+#else
+void axienet_tx_hwtstamp(struct axienet_local *lp,
+ struct axidma_bd *cur_p)
+#endif
+{
+ u32 sec = 0, nsec = 0, val;
+ u64 time64;
+ int err = 0;
+ u32 count, len = lp->axienet_config->tx_ptplen;
+ struct skb_shared_hwtstamps *shhwtstamps =
+ skb_hwtstamps((struct sk_buff *)cur_p->ptp_tx_skb);
+
+ val = axienet_txts_ior(lp, XAXIFIFO_TXTS_ISR);
+ if (unlikely(!(val & XAXIFIFO_TXTS_INT_RC_MASK)))
+ dev_info(lp->dev, "Did't get FIFO rx interrupt %d\n", val);
+
+ /* If FIFO is configured in cut through Mode we will get Rx complete
+ * interrupt even one byte is there in the fifo wait for the full packet
+ */
+ err = readl_poll_timeout_atomic(lp->tx_ts_regs + XAXIFIFO_TXTS_RLR, val,
+ ((val & XAXIFIFO_TXTS_RXFD_MASK) >=
+ len), 0, 1000000);
+ if (err)
+ netdev_err(lp->ndev, "%s: Didn't get the full timestamp packet",
+ __func__);
+
+ nsec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ sec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = ((val & XAXIFIFO_TXTS_TAG_MASK) >> XAXIFIFO_TXTS_TAG_SHIFT);
+ if (val != cur_p->ptp_tx_ts_tag) {
+ count = axienet_txts_ior(lp, XAXIFIFO_TXTS_RFO);
+ while (count) {
+ nsec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ sec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = ((val & XAXIFIFO_TXTS_TAG_MASK) >>
+ XAXIFIFO_TXTS_TAG_SHIFT);
+ if (val == cur_p->ptp_tx_ts_tag)
+ break;
+ count = axienet_txts_ior(lp, XAXIFIFO_TXTS_RFO);
+ }
+ if (val != cur_p->ptp_tx_ts_tag) {
+ dev_info(lp->dev, "Mismatching 2-step tag. Got %x",
+ val);
+ dev_info(lp->dev, "Expected %x\n",
+ cur_p->ptp_tx_ts_tag);
+ }
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ val = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+
+ time64 = sec * NS_PER_SEC + nsec;
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(time64);
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ skb_pull((struct sk_buff *)cur_p->ptp_tx_skb,
+ AXIENET_TS_HEADER_LEN);
+
+ skb_tstamp_tx((struct sk_buff *)cur_p->ptp_tx_skb, shhwtstamps);
+ dev_kfree_skb_any((struct sk_buff *)cur_p->ptp_tx_skb);
+ cur_p->ptp_tx_skb = 0;
+}
+
+/**
+ * axienet_rx_hwtstamp - Read rx timestamp from hw and update it to the skbuff
+ * @lp: Pointer to axienet local structure
+ * @skb: Pointer to the sk_buff structure
+ *
+ * Return: None.
+ */
+static void axienet_rx_hwtstamp(struct axienet_local *lp,
+ struct sk_buff *skb)
+{
+ u32 sec = 0, nsec = 0, val;
+ u64 time64;
+ int err = 0;
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+
+ val = axienet_rxts_ior(lp, XAXIFIFO_TXTS_ISR);
+ if (unlikely(!(val & XAXIFIFO_TXTS_INT_RC_MASK))) {
+ dev_info(lp->dev, "Did't get FIFO rx interrupt %d\n", val);
+ return;
+ }
+
+ val = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RFO);
+ if (!val)
+ return;
+
+ /* If FIFO is configured in cut through Mode we will get Rx complete
+ * interrupt even one byte is there in the fifo wait for the full packet
+ */
+ err = readl_poll_timeout_atomic(lp->rx_ts_regs + XAXIFIFO_TXTS_RLR, val,
+ ((val & XAXIFIFO_TXTS_RXFD_MASK) >= 12),
+ 0, 1000000);
+ if (err) {
+ netdev_err(lp->ndev, "%s: Didn't get the full timestamp packet",
+ __func__);
+ return;
+ }
+
+ nsec = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ sec = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RXFD);
+
+ if (lp->tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
+ time64 = sec * NS_PER_SEC + nsec;
+ shhwtstamps->hwtstamp = ns_to_ktime(time64);
}
}
+#endif
/**
* axienet_start_xmit_done - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
* @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
*
* This function is invoked from the Axi DMA Tx isr to notify the completion
* of transmit operation. It clears fields in the corresponding Tx BDs and
@@ -581,50 +712,84 @@ static void axienet_adjust_link(struct net_device *ndev)
* buffer. It finally invokes "netif_wake_queue" to restart transmission if
* required.
*/
-static void axienet_start_xmit_done(struct net_device *ndev)
+void axienet_start_xmit_done(struct net_device *ndev, struct axienet_dma_q *q)
{
u32 size = 0;
u32 packets = 0;
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
struct axienet_local *lp = netdev_priv(ndev);
+#endif
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
struct axidma_bd *cur_p;
+#endif
unsigned int status = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_ci];
+ status = cur_p->sband_stats;
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_ci];
status = cur_p->status;
+#endif
while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
- DMA_TO_DEVICE);
- if (cur_p->app4)
- dev_consume_skb_irq((struct sk_buff *)cur_p->app4);
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (cur_p->ptp_tx_skb)
+ axienet_tx_hwtstamp(lp, cur_p);
+#endif
+ if (cur_p->tx_desc_mapping == DESC_DMA_MAP_PAGE)
+ dma_unmap_page(ndev->dev.parent, cur_p->phys,
+ cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK,
+ DMA_TO_DEVICE);
+ if (cur_p->tx_skb)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->tx_skb);
/*cur_p->phys = 0;*/
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app4 = 0;
cur_p->status = 0;
+ cur_p->tx_skb = 0;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p->sband_stats = 0;
+#endif
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
packets++;
- ++lp->tx_bd_ci;
- lp->tx_bd_ci %= TX_BD_NUM;
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+ ++q->tx_bd_ci;
+ q->tx_bd_ci %= TX_BD_NUM;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_ci];
+ status = cur_p->sband_stats;
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_ci];
status = cur_p->status;
+#endif
}
ndev->stats.tx_packets += packets;
ndev->stats.tx_bytes += size;
-
- /* Matches barrier in axienet_start_xmit */
- smp_mb();
-
- netif_wake_queue(ndev);
+ q->tx_packets += packets;
+ q->tx_bytes += size;
+ /* Fixme: With the existing multiqueue implementation
+ * in the driver it is difficult to get the exact queue info.
+ * We should wake only the particular queue
+ * instead of waking all ndev queues.
+ */
+ netif_tx_wake_all_queues(ndev);
}
/**
* axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
- * @lp: Pointer to the axienet_local structure
+ * @q: Pointer to DMA queue structure
* @num_frag: The number of BDs to check for
*
* Return: 0, on success
@@ -635,61 +800,285 @@ static void axienet_start_xmit_done(struct net_device *ndev)
* transmission. If the BD or any of the BDs are not free the function
* returns a busy status. This is invoked from axienet_start_xmit.
*/
-static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
+static inline int axienet_check_tx_bd_space(struct axienet_dma_q *q,
int num_frag)
{
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+
+ if (CIRC_SPACE(q->tx_bd_tail, q->tx_bd_ci, TX_BD_NUM) < (num_frag + 1))
+ return NETDEV_TX_BUSY;
+
+ cur_p = &q->txq_bd_v[(q->tx_bd_tail + num_frag) % TX_BD_NUM];
+ if (cur_p->sband_stats & XMCDMA_BD_STS_ALL_MASK)
+ return NETDEV_TX_BUSY;
+#else
struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
+
+ if (CIRC_SPACE(q->tx_bd_tail, q->tx_bd_ci, TX_BD_NUM) < (num_frag + 1))
+ return NETDEV_TX_BUSY;
+
+ cur_p = &q->tx_bd_v[(q->tx_bd_tail + num_frag) % TX_BD_NUM];
if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
return NETDEV_TX_BUSY;
+#endif
return 0;
}
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
/**
- * axienet_start_xmit - Starts the transmission.
- * @skb: sk_buff pointer that contains data to be Txed.
- * @ndev: Pointer to net_device structure.
+ * axienet_create_tsheader - Create timestamp header for tx
+ * @q: Pointer to DMA queue structure
+ * @buf: Pointer to the buf to copy timestamp header
+ * @msg_type: PTP message type
*
- * Return: NETDEV_TX_OK, on success
- * NETDEV_TX_BUSY, if any of the descriptors are not free
- *
- * This function is invoked from upper layers to initiate transmission. The
- * function uses the next available free BDs and populates their fields to
- * start the transmission. Additionally if checksum offloading is supported,
- * it populates AXI Stream Control fields with appropriate values.
+ * Return: None.
*/
-static netdev_tx_t
-axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static void axienet_create_tsheader(u8 *buf, u8 msg_type,
+ struct axienet_dma_q *q)
+{
+ struct axienet_local *lp = q->lp;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
+ struct axidma_bd *cur_p;
+#endif
+ u64 val;
+ u32 tmp;
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_tail];
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
+#endif
+
+ if (msg_type == TX_TS_OP_ONESTEP) {
+ buf[0] = TX_TS_OP_ONESTEP;
+ buf[1] = TX_TS_CSUM_UPDATE;
+ buf[4] = TX_PTP_TS_OFFSET;
+ buf[6] = TX_PTP_CSUM_OFFSET;
+ } else {
+ buf[0] = TX_TS_OP_TWOSTEP;
+ buf[2] = cur_p->ptp_tx_ts_tag & 0xFF;
+ buf[3] = (cur_p->ptp_tx_ts_tag >> 8) & 0xFF;
+ }
+
+ if (lp->axienet_config->mactype == XAXIENET_1G ||
+ lp->axienet_config->mactype == XAXIENET_2_5G) {
+ memcpy(&val, buf, AXIENET_TS_HEADER_LEN);
+ swab64s(&val);
+ memcpy(buf, &val, AXIENET_TS_HEADER_LEN);
+ } else if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ memcpy(&tmp, buf, XXVENET_TS_HEADER_LEN);
+ axienet_txts_iow(lp, XAXIFIFO_TXTS_TXFD, tmp);
+ axienet_txts_iow(lp, XAXIFIFO_TXTS_TLR, XXVENET_TS_HEADER_LEN);
+ }
+}
+#endif
+
+#ifdef CONFIG_XILINX_TSN
+static inline u16 get_tsn_queue(u8 pcp, u16 num_tc)
+{
+ u16 queue = 0;
+
+ /* For 3 queue system, RE queue is 1 and ST queue is 2
+ * For 2 queue system, ST queue is 1. BE queue is always 0
+ */
+ if (pcp == 4) {
+ if (num_tc == 2)
+ queue = 1;
+ else
+ queue = 2;
+ } else if ((num_tc == 3) && (pcp == 2 || pcp == 3)) {
+ queue = 1;
+ }
+
+ return queue;
+}
+
+static inline u16 tsn_queue_mapping(const struct sk_buff *skb, u16 num_tc)
+{
+ int queue = 0;
+ u16 vlan_tci;
+ u8 pcp;
+
+ struct ethhdr *hdr = (struct ethhdr *)skb->data;
+ u16 ether_type = ntohs(hdr->h_proto);
+
+ if (unlikely(ether_type == ETH_P_8021Q)) {
+ struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)skb->data;
+
+ /* ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); */
+
+ vlan_tci = ntohs(vhdr->h_vlan_TCI);
+
+ pcp = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ pr_debug("vlan_tci: %x\n", vlan_tci);
+ pr_debug("pcp: %d\n", pcp);
+
+ queue = get_tsn_queue(pcp, num_tc);
+ }
+ pr_debug("selected queue: %d\n", queue);
+ return queue;
+}
+#endif
+
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+static int axienet_skb_tstsmp(struct sk_buff **__skb, struct axienet_dma_q *q,
+ struct net_device *ndev)
+{
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
+ struct axidma_bd *cur_p;
+#endif
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct sk_buff *old_skb = *__skb;
+ struct sk_buff *skb = *__skb;
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_tail];
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
+#endif
+
+ if (!lp->is_tsn) {
+ if ((((lp->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_SYNC) ||
+ (lp->tstamp_config.tx_type == HWTSTAMP_TX_ON)) ||
+ lp->eth_hasptp) && (lp->axienet_config->mactype !=
+ XAXIENET_10G_25G)) {
+ u8 *tmp;
+ struct sk_buff *new_skb;
+
+ if (skb_headroom(old_skb) < AXIENET_TS_HEADER_LEN) {
+ new_skb =
+ skb_realloc_headroom(old_skb,
+ AXIENET_TS_HEADER_LEN);
+ if (!new_skb) {
+ dev_err(&ndev->dev, "failed to allocate new socket buffer\n");
+ dev_kfree_skb_any(old_skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Transfer the ownership to the
+ * new socket buffer if required
+ */
+ if (old_skb->sk)
+ skb_set_owner_w(new_skb, old_skb->sk);
+ dev_kfree_skb_any(old_skb);
+ *__skb = new_skb;
+ skb = new_skb;
+ }
+
+ tmp = skb_push(skb, AXIENET_TS_HEADER_LEN);
+ memset(tmp, 0, AXIENET_TS_HEADER_LEN);
+ cur_p->ptp_tx_ts_tag++;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ if (lp->tstamp_config.tx_type ==
+ HWTSTAMP_TX_ONESTEP_SYNC) {
+ axienet_create_tsheader(tmp,
+ TX_TS_OP_ONESTEP
+ , q);
+ } else {
+ axienet_create_tsheader(tmp,
+ TX_TS_OP_TWOSTEP
+ , q);
+ skb_shinfo(skb)->tx_flags
+ |= SKBTX_IN_PROGRESS;
+ cur_p->ptp_tx_skb =
+ (unsigned long)skb_get(skb);
+ }
+ }
+ } else if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (lp->axienet_config->mactype == XAXIENET_10G_25G)) {
+ cur_p->ptp_tx_ts_tag = (prandom_u32() &
+ ~XAXIFIFO_TXTS_TAG_MASK) + 1;
+ if (lp->tstamp_config.tx_type ==
+ HWTSTAMP_TX_ONESTEP_SYNC) {
+ axienet_create_tsheader(lp->tx_ptpheader,
+ TX_TS_OP_ONESTEP, q);
+ } else {
+ axienet_create_tsheader(lp->tx_ptpheader,
+ TX_TS_OP_TWOSTEP, q);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ cur_p->ptp_tx_skb = (phys_addr_t)skb_get(skb);
+ }
+ }
+ }
+ return NETDEV_TX_OK;
+}
+#endif
+
+static int axienet_queue_xmit(struct sk_buff *skb,
+ struct net_device *ndev, u16 map)
{
u32 ii;
u32 num_frag;
u32 csum_start_off;
u32 csum_index_off;
- skb_frag_t *frag;
dma_addr_t tail_p;
struct axienet_local *lp = netdev_priv(ndev);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
struct axidma_bd *cur_p;
-
+#endif
+ unsigned long flags;
+ u32 pad = 0;
+ struct axienet_dma_q *q;
+
+#ifdef CONFIG_XILINX_TSN
+ if (unlikely(lp->is_tsn)) {
+ map = tsn_queue_mapping(skb, lp->num_tc);
+#ifdef CONFIG_XILINX_TSN_PTP
+ const struct ethhdr *eth;
+
+ eth = (struct ethhdr *)skb->data;
+ /* check if skb is a PTP frame ? */
+ if (eth->h_proto == htons(ETH_P_1588))
+ return axienet_ptp_xmit(skb, ndev);
+#endif
+ if (lp->temac_no == XAE_TEMAC2) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
num_frag = skb_shinfo(skb)->nr_frags;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
- if (axienet_check_tx_bd_space(lp, num_frag)) {
- if (netif_queue_stopped(ndev))
- return NETDEV_TX_BUSY;
+ q = lp->dq[map];
- netif_stop_queue(ndev);
-
- /* Matches barrier in axienet_start_xmit_done */
- smp_mb();
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_tail];
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
+#endif
- /* Space might have just been freed - check again */
- if (axienet_check_tx_bd_space(lp, num_frag))
- return NETDEV_TX_BUSY;
+ spin_lock_irqsave(&q->tx_lock, flags);
+ if (axienet_check_tx_bd_space(q, num_frag)) {
+ if (!__netif_subqueue_stopped(ndev, map))
+ netif_stop_subqueue(ndev, map);
+ spin_unlock_irqrestore(&q->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
- netif_wake_queue(ndev);
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (axienet_skb_tstsmp(&skb, q, ndev)) {
+ spin_unlock_irqrestore(&q->tx_lock, flags);
+ return NETDEV_TX_BUSY;
}
+#endif
+ /* Work around for XXV MAC as MAC will drop the packets
+ * of size less than 64 bytes we need to append data
+ * to make packet length greater than or equal to 64
+ */
+ if (skb->len < XXV_MAC_MIN_PKT_LEN &&
+ (lp->axienet_config->mactype == XAXIENET_10G_25G))
+ pad = XXV_MAC_MIN_PKT_LEN - skb->len;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL && !lp->eth_hasnobuf &&
+ (lp->axienet_config->mactype == XAXIENET_1G)) {
if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
/* Tx Full Checksum Offload Enabled */
cur_p->app0 |= 2;
@@ -700,48 +1089,131 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 1;
cur_p->app1 = (csum_start_off << 16) | csum_index_off;
}
- } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ } else if (skb->ip_summed == CHECKSUM_UNNECESSARY &&
+ !lp->eth_hasnobuf &&
+ (lp->axienet_config->mactype == XAXIENET_1G)) {
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
- cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
+ if (num_frag == 0) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p->cntrl = (skb_headlen(skb) |
+ XMCDMA_BD_CTRL_TXSOF_MASK) + pad;
+#else
+ cur_p->cntrl = (skb_headlen(skb) |
+ XAXIDMA_BD_CTRL_TXSOF_MASK) + pad;
+#endif
+ } else {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p->cntrl = (skb_headlen(skb) | XMCDMA_BD_CTRL_TXSOF_MASK);
+#else
+ cur_p->cntrl = (skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK);
+#endif
+ }
+
+ if (!q->eth_hasdre &&
+ (((phys_addr_t)skb->data & 0x3) || (num_frag > 0))) {
+ skb_copy_and_csum_dev(skb, q->tx_buf[q->tx_bd_tail]);
+
+ cur_p->phys = q->tx_bufs_dma +
+ (q->tx_buf[q->tx_bd_tail] - q->tx_bufs);
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p->cntrl = skb_pagelen(skb) | XMCDMA_BD_CTRL_TXSOF_MASK;
+#else
+ cur_p->cntrl = skb_pagelen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
+#endif
+ goto out;
+ } else {
+ cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ }
+ cur_p->tx_desc_mapping = DESC_DMA_MAP_SINGLE;
for (ii = 0; ii < num_frag; ii++) {
- ++lp->tx_bd_tail;
- lp->tx_bd_tail %= TX_BD_NUM;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ u32 len;
+ skb_frag_t *frag;
+
+ ++q->tx_bd_tail;
+ q->tx_bd_tail %= TX_BD_NUM;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_tail];
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
+#endif
frag = &skb_shinfo(skb)->frags[ii];
- cur_p->phys = dma_map_single(ndev->dev.parent,
- skb_frag_address(frag),
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- cur_p->cntrl = skb_frag_size(frag);
+ len = skb_frag_size(frag);
+ cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, len,
+ DMA_TO_DEVICE);
+ cur_p->cntrl = len;
+ /* Only add padding to the end of the last element */
+ if ((ii + 1) == num_frag)
+ cur_p->cntrl = len + pad;
+ cur_p->tx_desc_mapping = DESC_DMA_MAP_PAGE;
}
+out:
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p->cntrl |= XMCDMA_BD_CTRL_TXEOF_MASK;
+ tail_p = q->tx_bd_p + sizeof(*q->txq_bd_v) * q->tx_bd_tail;
+#else
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
- cur_p->app4 = (unsigned long)skb;
+ tail_p = q->tx_bd_p + sizeof(*q->tx_bd_v) * q->tx_bd_tail;
+#endif
+ cur_p->tx_skb = (phys_addr_t)skb;
+
+ /* Ensure BD write before starting transfer */
+ wmb();
- tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
- axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- ++lp->tx_bd_tail;
- lp->tx_bd_tail %= TX_BD_NUM;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id),
+ tail_p);
+#else
+ axienet_dma_bdout(q, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+#endif
+ ++q->tx_bd_tail;
+ q->tx_bd_tail %= TX_BD_NUM;
+
+ spin_unlock_irqrestore(&q->tx_lock, flags);
return NETDEV_TX_OK;
}
/**
+ * axienet_start_xmit - Starts the transmission.
+ * @skb: sk_buff pointer that contains data to be Txed.
+ * @ndev: Pointer to net_device structure.
+ *
+ * Return: NETDEV_TX_OK, on success
+ * NETDEV_TX_BUSY, if any of the descriptors are not free
+ *
+ * This function is invoked from upper layers to initiate transmission. The
+ * function uses the next available free BDs and populates their fields to
+ * start the transmission. Additionally if checksum offloading is supported,
+ * it populates AXI Stream Control fields with appropriate values.
+ */
+static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ u16 map = skb_get_queue_mapping(skb); /* Single dma queue default*/
+
+ return axienet_queue_xmit(skb, ndev, map);
+}
+
+/**
* axienet_recv - Is called from Axi DMA Rx Isr to complete the received
* BD processing.
* @ndev: Pointer to net_device structure.
+ * @budget: NAPI budget
+ * @q: Pointer to axienet DMA queue structure
*
- * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
- * does minimal processing and invokes "netif_rx" to complete further
- * processing.
+ * This function is invoked from the Axi DMA Rx isr(poll) to process the Rx BDs
+ * It does minimal processing and invokes "netif_receive_skb" to complete
+ * further processing.
+ * Return: Number of BD's processed.
*/
-static void axienet_recv(struct net_device *ndev)
+static int axienet_recv(struct net_device *ndev, int budget,
+ struct axienet_dma_q *q)
{
u32 length;
u32 csumstatus;
@@ -750,26 +1222,82 @@ static void axienet_recv(struct net_device *ndev)
dma_addr_t tail_p = 0;
struct axienet_local *lp = netdev_priv(ndev);
struct sk_buff *skb, *new_skb;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
struct axidma_bd *cur_p;
+#endif
+ unsigned int numbdfree = 0;
+
+ /* Get relevat BD status value */
+ rmb();
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->rxq_bd_v[q->rx_bd_ci];
+#else
+ cur_p = &q->rx_bd_v[q->rx_bd_ci];
+#endif
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+ while ((numbdfree < budget) &&
+ (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ tail_p = q->rx_bd_p + sizeof(*q->rxq_bd_v) * q->rx_bd_ci;
+#else
+ tail_p = q->rx_bd_p + sizeof(*q->rx_bd_v) * q->rx_bd_ci;
+#endif
+ skb = (struct sk_buff *)(cur_p->sw_id_offset);
- while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
- skb = (struct sk_buff *) (cur_p->sw_id_offset);
- length = cur_p->app4 & 0x0000FFFF;
+ if (lp->eth_hasnobuf ||
+ (lp->axienet_config->mactype != XAXIENET_1G))
+ length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
+ else
+ length = cur_p->app4 & 0x0000FFFF;
dma_unmap_single(ndev->dev.parent, cur_p->phys,
lp->max_frm_size,
DMA_FROM_DEVICE);
skb_put(skb, length);
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (!lp->is_tsn) {
+ if ((lp->tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL ||
+ lp->eth_hasptp) && (lp->axienet_config->mactype !=
+ XAXIENET_10G_25G)) {
+ u32 sec, nsec;
+ u64 time64;
+ struct skb_shared_hwtstamps *shhwtstamps;
+
+ if (lp->axienet_config->mactype == XAXIENET_1G ||
+ lp->axienet_config->mactype == XAXIENET_2_5G) {
+ /* The first 8 bytes will be the timestamp */
+ memcpy(&sec, &skb->data[0], 4);
+ memcpy(&nsec, &skb->data[4], 4);
+
+ sec = cpu_to_be32(sec);
+ nsec = cpu_to_be32(nsec);
+ } else {
+ /* The first 8 bytes will be the timestamp */
+ memcpy(&nsec, &skb->data[0], 4);
+ memcpy(&sec, &skb->data[4], 4);
+ }
+
+ /* Remove these 8 bytes from the buffer */
+ skb_pull(skb, 8);
+ time64 = sec * NS_PER_SEC + nsec;
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = ns_to_ktime(time64);
+ } else if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ axienet_rx_hwtstamp(lp, skb);
+ }
+ }
+#endif
skb->protocol = eth_type_trans(skb, ndev);
/*skb_checksum_none_assert(skb);*/
skb->ip_summed = CHECKSUM_NONE;
/* if we're doing Rx csum offload, set it up */
- if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
+ if (lp->features & XAE_FEATURE_FULL_RX_CSUM &&
+ (lp->axienet_config->mactype == XAXIENET_1G) &&
+ !lp->eth_hasnobuf) {
csumstatus = (cur_p->app2 &
XAE_FULL_CSUM_STATUS_MASK) >> 3;
if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
@@ -778,138 +1306,195 @@ static void axienet_recv(struct net_device *ndev)
}
} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
+ skb->len > 64 && !lp->eth_hasnobuf &&
+ (lp->axienet_config->mactype == XAXIENET_1G)) {
skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
- netif_rx(skb);
+ netif_receive_skb(skb);
size += length;
packets++;
- new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
- if (!new_skb)
- return;
+ new_skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ if (new_skb == NULL) {
+ dev_err(lp->dev, "No memory for new_skb\n\r");
+ break;
+ }
+
+ /* Ensure that the skb is completely updated
+ * prio to mapping the DMA
+ */
+ wmb();
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
lp->max_frm_size,
DMA_FROM_DEVICE);
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
- cur_p->sw_id_offset = (u32) new_skb;
+ cur_p->sw_id_offset = (phys_addr_t)new_skb;
+
+ ++q->rx_bd_ci;
+ q->rx_bd_ci %= RX_BD_NUM;
- ++lp->rx_bd_ci;
- lp->rx_bd_ci %= RX_BD_NUM;
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+ /* Get relevat BD status value */
+ rmb();
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->rxq_bd_v[q->rx_bd_ci];
+#else
+ cur_p = &q->rx_bd_v[q->rx_bd_ci];
+#endif
+ numbdfree++;
}
ndev->stats.rx_packets += packets;
ndev->stats.rx_bytes += size;
+ q->rx_packets += packets;
+ q->rx_bytes += size;
+
+ if (tail_p) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
+ q->rx_offset, tail_p);
+#else
+ axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+#endif
+ }
- if (tail_p)
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ return numbdfree;
}
/**
- * axienet_tx_irq - Tx Done Isr.
- * @irq: irq number
- * @_ndev: net_device pointer
+ * xaxienet_rx_poll - Poll routine for rx packets (NAPI)
+ * @napi: napi structure pointer
+ * @quota: Max number of rx packets to be processed.
*
- * Return: IRQ_HANDLED for all cases.
+ * This is the poll routine for rx part.
+ * It will process the packets maximux quota value.
*
- * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
- * to complete the BD processing.
+ * Return: number of packets received
*/
-static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
+int xaxienet_rx_poll(struct napi_struct *napi, int quota)
{
- u32 cr;
- unsigned int status;
- struct net_device *ndev = _ndev;
+ struct net_device *ndev = napi->dev;
struct axienet_local *lp = netdev_priv(ndev);
-
- status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
- axienet_start_xmit_done(lp->ndev);
- goto out;
+ int work_done = 0;
+ unsigned int status, cr;
+
+ int map = napi - lp->napi;
+
+ struct axienet_dma_q *q = lp->dq[map];
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ spin_lock(&q->rx_lock);
+ status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ while ((status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) &&
+ (work_done < quota)) {
+ axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset, status);
+ if (status & XMCDMA_IRQ_ERR_MASK) {
+ dev_err(lp->dev, "Rx error 0x%x\n\r", status);
+ break;
+ }
+ work_done += axienet_recv(lp->ndev, quota - work_done, q);
+ status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset);
}
- if (!(status & XAXIDMA_IRQ_ALL_MASK))
- dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
- (lp->tx_bd_v[lp->tx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ spin_unlock(&q->rx_lock);
+#else
+ spin_lock(&q->rx_lock);
+
+ status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
+ while ((status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) &&
+ (work_done < quota)) {
+ axienet_dma_out32(q, XAXIDMA_RX_SR_OFFSET, status);
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ dev_err(lp->dev, "Rx error 0x%x\n\r", status);
+ break;
+ }
+ work_done += axienet_recv(lp->ndev, quota - work_done, q);
+ status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
+ }
+ spin_unlock(&q->rx_lock);
+#endif
- tasklet_schedule(&lp->dma_err_tasklet);
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+ if (work_done < quota) {
+ napi_complete(napi);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ /* Enable the interrupts again */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ XMCDMA_RX_OFFSET);
+ cr |= (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ XMCDMA_RX_OFFSET, cr);
+#else
+ /* Enable the interrupts again */
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+#endif
}
-out:
- return IRQ_HANDLED;
+
+ return work_done;
}
/**
- * axienet_rx_irq - Rx Isr.
+ * axienet_err_irq - Axi Ethernet error irq.
* @irq: irq number
* @_ndev: net_device pointer
*
* Return: IRQ_HANDLED for all cases.
*
- * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
- * processing.
+ * This is the Axi DMA error ISR. It updates the rx memory over run condition.
*/
-static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
+static irqreturn_t axienet_err_irq(int irq, void *_ndev)
{
- u32 cr;
unsigned int status;
struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
- status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
- axienet_recv(lp->ndev);
- goto out;
+ status = axienet_ior(lp, XAE_IS_OFFSET);
+ if (status & XAE_INT_RXFIFOOVR_MASK) {
+ ndev->stats.rx_fifo_errors++;
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXFIFOOVR_MASK);
}
- if (!(status & XAXIDMA_IRQ_ALL_MASK))
- dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
- (lp->rx_bd_v[lp->rx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- tasklet_schedule(&lp->dma_err_tasklet);
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+ if (status & XAE_INT_RXRJECT_MASK) {
+ ndev->stats.rx_dropped++;
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
}
-out:
+
return IRQ_HANDLED;
}
-static void axienet_dma_err_handler(unsigned long data);
+static int axienet_mii_init(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ int ret, mdio_mcreg;
+
+ mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
+ ret = axienet_mdio_wait_until_ready(lp);
+ if (ret < 0)
+ return ret;
+
+ /* Disable the MDIO interface till Axi Ethernet Reset is completed.
+ * When we do an Axi Ethernet reset, it resets the complete core
+ * Including the MDIO. If MDIO is not disabled when the reset process is
+ * Started, MDIO will be broken afterwards.
+ */
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET,
+ (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
+ axienet_device_reset(ndev);
+ /* Enable the MDIO */
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
+ ret = axienet_mdio_wait_until_ready(lp);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
/**
* axienet_open - Driver open routine.
@@ -925,33 +1510,37 @@ static void axienet_dma_err_handler(unsigned long data);
*/
static int axienet_open(struct net_device *ndev)
{
- int ret, mdio_mcreg;
+ int ret = 0, i = 0;
struct axienet_local *lp = netdev_priv(ndev);
struct phy_device *phydev = NULL;
+ struct axienet_dma_q *q;
+ u32 reg, err;
dev_dbg(&ndev->dev, "axienet_open()\n");
- mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
- ret = axienet_mdio_wait_until_ready(lp);
- if (ret < 0)
- return ret;
- /* Disable the MDIO interface till Axi Ethernet Reset is completed.
- * When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. If MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards.
- */
- axienet_iow(lp, XAE_MDIO_MC_OFFSET,
- (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
- axienet_device_reset(ndev);
- /* Enable the MDIO */
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
- ret = axienet_mdio_wait_until_ready(lp);
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G)
+ axienet_device_reset(ndev);
+ else
+ ret = axienet_mii_init(ndev);
if (ret < 0)
return ret;
if (lp->phy_node) {
- phydev = of_phy_connect(lp->ndev, lp->phy_node,
- axienet_adjust_link, 0, lp->phy_mode);
+ if (lp->phy_mode == XAE_PHY_TYPE_GMII) {
+ phydev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link, 0,
+ PHY_INTERFACE_MODE_GMII);
+ } else if (lp->phy_mode == XAE_PHY_TYPE_RGMII_2_0) {
+ phydev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link, 0,
+ PHY_INTERFACE_MODE_RGMII_ID);
+ } else if ((lp->axienet_config->mactype == XAXIENET_1G) ||
+ (lp->axienet_config->mactype == XAXIENET_2_5G)) {
+ phydev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link,
+ lp->phy_flags,
+ lp->phy_interface);
+ }
if (!phydev)
dev_err(lp->dev, "of_phy_connect() failed\n");
@@ -959,27 +1548,185 @@ static int axienet_open(struct net_device *ndev)
phy_start(phydev);
}
- /* Enable tasklets for Axi DMA error handling */
- tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
- (unsigned long) lp);
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ /* Enable tasklets for Axi DMA error handling */
+ for_each_rx_dma_queue(lp, i) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ tasklet_init(&lp->dma_err_tasklet[i],
+ axienet_mcdma_err_handler,
+ (unsigned long)lp->dq[i]);
+#else
+ tasklet_init(&lp->dma_err_tasklet[i],
+ axienet_dma_err_handler,
+ (unsigned long)lp->dq[i]);
+#endif
+
+ /* Enable NAPI scheduling before enabling Axi DMA Rx IRQ, or you
+ * might run into a race condition; the RX ISR disables IRQ processing
+ * before scheduling the NAPI function to complete the processing.
+ * If NAPI scheduling is (still) disabled at that time, no more RX IRQs
+ * will be processed as only the NAPI function re-enables them!
+ */
+ napi_enable(&lp->napi[i]);
+ }
+ for_each_tx_dma_queue(lp, i) {
+ struct axienet_dma_q *q = lp->dq[i];
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ /* Enable interrupts for Axi MCDMA Tx */
+ ret = request_irq(q->tx_irq, axienet_mcdma_tx_irq,
+ IRQF_SHARED, ndev->name, ndev);
+ if (ret)
+ goto err_tx_irq;
+#else
+ /* Enable interrupts for Axi DMA Tx */
+ ret = request_irq(q->tx_irq, axienet_tx_irq,
+ 0, ndev->name, ndev);
+ if (ret)
+ goto err_tx_irq;
+#endif
+ }
+ for_each_rx_dma_queue(lp, i) {
+ struct axienet_dma_q *q = lp->dq[i];
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ /* Enable interrupts for Axi MCDMA Rx */
+ ret = request_irq(q->rx_irq, axienet_mcdma_rx_irq,
+ IRQF_SHARED, ndev->name, ndev);
+ if (ret)
+ goto err_rx_irq;
+#else
+ /* Enable interrupts for Axi DMA Rx */
+ ret = request_irq(q->rx_irq, axienet_rx_irq,
+ 0, ndev->name, ndev);
+ if (ret)
+ goto err_rx_irq;
+#endif
+ }
+ }
+#ifdef CONFIG_XILINX_TSN_PTP
+ if (lp->is_tsn) {
+ INIT_WORK(&lp->tx_tstamp_work, axienet_tx_tstamp);
+ skb_queue_head_init(&lp->ptp_txq);
+
+ lp->ptp_rx_hw_pointer = 0;
+ lp->ptp_rx_sw_pointer = 0xff;
+
+ axienet_iow(lp, PTP_RX_CONTROL_OFFSET, PTP_RX_PACKET_CLEAR);
+
+ ret = request_irq(lp->ptp_rx_irq, axienet_ptp_rx_irq,
+ 0, "ptp_rx", ndev);
+ if (ret)
+ goto err_ptp_rx_irq;
+
+ ret = request_irq(lp->ptp_tx_irq, axienet_ptp_tx_irq,
+ 0, "ptp_tx", ndev);
+ if (ret)
+ goto err_ptp_rx_irq;
+ }
+#endif
+
+ if (lp->phy_mode == XXE_PHY_TYPE_USXGMII) {
+ netdev_dbg(ndev, "RX reg: 0x%x\n",
+ axienet_ior(lp, XXV_RCW1_OFFSET));
+ /* USXGMII setup at selected speed */
+ reg = axienet_ior(lp, XXV_USXGMII_AN_OFFSET);
+ reg &= ~USXGMII_RATE_MASK;
+ netdev_dbg(ndev, "usxgmii_rate %d\n", lp->usxgmii_rate);
+ switch (lp->usxgmii_rate) {
+ case SPEED_1000:
+ reg |= USXGMII_RATE_1G;
+ break;
+ case SPEED_2500:
+ reg |= USXGMII_RATE_2G5;
+ break;
+ case SPEED_10:
+ reg |= USXGMII_RATE_10M;
+ break;
+ case SPEED_100:
+ reg |= USXGMII_RATE_100M;
+ break;
+ case SPEED_5000:
+ reg |= USXGMII_RATE_5G;
+ break;
+ case SPEED_10000:
+ reg |= USXGMII_RATE_10G;
+ break;
+ default:
+ reg |= USXGMII_RATE_1G;
+ }
+ reg |= USXGMII_FD;
+ reg |= (USXGMII_EN | USXGMII_LINK_STS);
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET, reg);
+ reg |= USXGMII_AN_EN;
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET, reg);
+ /* AN Restart bit should be reset, set and then reset as per
+ * spec with a 1 ms delay for a raising edge trigger
+ */
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET,
+ reg & ~USXGMII_AN_RESTART);
+ mdelay(1);
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET,
+ reg | USXGMII_AN_RESTART);
+ mdelay(1);
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET,
+ reg & ~USXGMII_AN_RESTART);
+
+ /* Check block lock bit to make sure RX path is ok with
+ * USXGMII initialization.
+ */
+ err = readl_poll_timeout(lp->regs + XXV_STATRX_BLKLCK_OFFSET,
+ reg, (reg & XXV_RX_BLKLCK_MASK),
+ 100, DELAY_OF_ONE_MILLISEC);
+ if (err) {
+ netdev_err(ndev, "%s: USXGMII Block lock bit not set",
+ __func__);
+ return -ENODEV;
+ }
+
+ err = readl_poll_timeout(lp->regs + XXV_USXGMII_AN_STS_OFFSET,
+ reg, (reg & USXGMII_AN_STS_COMP_MASK),
+ 1000000, DELAY_OF_ONE_MILLISEC);
+ if (err) {
+ netdev_err(ndev, "%s: USXGMII AN not complete",
+ __func__);
+ return -ENODEV;
+ }
+
+ netdev_info(ndev, "USXGMII setup at %d\n", lp->usxgmii_rate);
+ }
- /* Enable interrupts for Axi DMA Tx */
- ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
- if (ret)
- goto err_tx_irq;
- /* Enable interrupts for Axi DMA Rx */
- ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
- if (ret)
- goto err_rx_irq;
+ if (!lp->eth_hasnobuf && (lp->axienet_config->mactype == XAXIENET_1G)) {
+ /* Enable interrupts for Axi Ethernet */
+ ret = request_irq(lp->eth_irq, axienet_err_irq, 0, ndev->name,
+ ndev);
+ if (ret)
+ goto err_eth_irq;
+ }
+ netif_tx_start_all_queues(ndev);
return 0;
+err_eth_irq:
+ while (i--) {
+ q = lp->dq[i];
+ free_irq(q->rx_irq, ndev);
+ }
+ i = lp->num_tx_queues;
err_rx_irq:
- free_irq(lp->tx_irq, ndev);
+ while (i--) {
+ q = lp->dq[i];
+ free_irq(q->tx_irq, ndev);
+ }
err_tx_irq:
+ for_each_rx_dma_queue(lp, i)
+ napi_disable(&lp->napi[i]);
+#ifdef CONFIG_XILINX_TSN_PTP
+err_ptp_rx_irq:
+#endif
if (phydev)
phy_disconnect(phydev);
- tasklet_kill(&lp->dma_err_tasklet);
+ phydev = NULL;
+ for_each_rx_dma_queue(lp, i)
+ tasklet_kill(&lp->dma_err_tasklet[i]);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
@@ -997,28 +1744,51 @@ err_tx_irq:
static int axienet_stop(struct net_device *ndev)
{
u32 cr;
+ u32 i;
struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
dev_dbg(&ndev->dev, "axienet_close()\n");
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr & (~XAXIDMA_CR_RUNSTOP_MASK));
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr & (~XAXIDMA_CR_RUNSTOP_MASK));
- axienet_setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ for_each_tx_dma_queue(lp, i) {
+ q = lp->dq[i];
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
+ cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+ free_irq(q->tx_irq, ndev);
+ }
+ for_each_rx_dma_queue(lp, i) {
+ q = lp->dq[i];
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
+ cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+
+ netif_stop_queue(ndev);
+ napi_disable(&lp->napi[i]);
+ tasklet_kill(&lp->dma_err_tasklet[i]);
+
+ free_irq(q->rx_irq, ndev);
+ }
+ }
- tasklet_kill(&lp->dma_err_tasklet);
+#ifdef CONFIG_XILINX_TSN_PTP
+ if (lp->is_tsn) {
+ free_irq(lp->ptp_tx_irq, ndev);
+ free_irq(lp->ptp_rx_irq, ndev);
+ }
+#endif
- free_irq(lp->tx_irq, ndev);
- free_irq(lp->rx_irq, ndev);
+ if ((lp->axienet_config->mactype == XAXIENET_1G) && !lp->eth_hasnobuf)
+ free_irq(lp->eth_irq, ndev);
if (ndev->phydev)
phy_disconnect(ndev->phydev);
- axienet_dma_bd_release(ndev);
+ if (lp->temac_no != XAE_TEMAC2)
+ axienet_dma_bd_release(ndev);
return 0;
}
@@ -1060,15 +1830,209 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
static void axienet_poll_controller(struct net_device *ndev)
{
struct axienet_local *lp = netdev_priv(ndev);
- disable_irq(lp->tx_irq);
- disable_irq(lp->rx_irq);
- axienet_rx_irq(lp->tx_irq, ndev);
- axienet_tx_irq(lp->rx_irq, ndev);
- enable_irq(lp->tx_irq);
- enable_irq(lp->rx_irq);
+ int i;
+
+ for_each_tx_dma_queue(lp, i)
+ disable_irq(lp->dq[i]->tx_irq);
+ for_each_rx_dma_queue(lp, i)
+ disable_irq(lp->dq[i]->rx_irq);
+
+ for_each_rx_dma_queue(lp, i)
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_mcdma_rx_irq(lp->dq[i]->rx_irq, ndev);
+#else
+ axienet_rx_irq(lp->dq[i]->rx_irq, ndev);
+#endif
+ for_each_tx_dma_queue(lp, i)
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_mcdma_tx_irq(lp->dq[i]->tx_irq, ndev);
+#else
+ axienet_tx_irq(lp->dq[i]->tx_irq, ndev);
+#endif
+ for_each_tx_dma_queue(lp, i)
+ enable_irq(lp->dq[i]->tx_irq);
+ for_each_rx_dma_queue(lp, i)
+ enable_irq(lp->dq[i]->rx_irq);
}
#endif
+#if defined (CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined (CONFIG_XILINX_TSN_PTP)
+/**
+ * axienet_set_timestamp_mode - sets up the hardware for the requested mode
+ * @lp: Pointer to axienet local structure
+ * @config: the hwtstamp configuration requested
+ *
+ * Return: 0 on success, Negative value on errors
+ */
+static int axienet_set_timestamp_mode(struct axienet_local *lp,
+ struct hwtstamp_config *config)
+{
+ u32 regval;
+
+#ifdef CONFIG_XILINX_TSN_PTP
+ if (lp->is_tsn) {
+ /* reserved for future extensions */
+ if (config->flags)
+ return -EINVAL;
+
+ if (config->tx_type < HWTSTAMP_TX_OFF ||
+ config->tx_type > HWTSTAMP_TX_ONESTEP_SYNC)
+ return -ERANGE;
+
+ lp->ptp_ts_type = config->tx_type;
+
+ /* On RX always timestamp everything */
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+ return 0;
+ }
+#endif
+
+ /* reserved for future extensions */
+ if (config->flags)
+ return -EINVAL;
+
+ /* Read the current value in the MAC TX CTRL register */
+ regval = axienet_ior(lp, XAE_TC_OFFSET);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ regval &= ~XAE_TC_INBAND1588_MASK;
+ break;
+ case HWTSTAMP_TX_ON:
+ config->tx_type = HWTSTAMP_TX_ON;
+ regval |= XAE_TC_INBAND1588_MASK;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ regval |= XAE_TC_INBAND1588_MASK;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ axienet_iow(lp, XAE_TC_OFFSET, regval);
+
+ /* Read the current value in the MAC RX RCW1 register */
+ regval = axienet_ior(lp, XAE_RCW1_OFFSET);
+
+ /* On RX always timestamp everything */
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ regval &= ~XAE_RCW1_INBAND1588_MASK;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ regval |= XAE_RCW1_INBAND1588_MASK;
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ axienet_iow(lp, XAE_RCW1_OFFSET, regval);
+
+ return 0;
+}
+
+/**
+ * axienet_set_ts_config - user entry point for timestamp mode
+ * @lp: Pointer to axienet local structure
+ * @ifr: ioctl data
+ *
+ * Set hardware to the requested more. If unsupported return an error
+ * with no changes. Otherwise, store the mode for future reference
+ *
+ * Return: 0 on success, Negative value on errors
+ */
+static int axienet_set_ts_config(struct axienet_local *lp, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = axienet_set_timestamp_mode(lp, &config);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&lp->tstamp_config, &config, sizeof(lp->tstamp_config));
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+/**
+ * axienet_get_ts_config - return the current timestamp configuration
+ * to the user
+ * @lp: pointer to axienet local structure
+ * @ifr: ioctl data
+ *
+ * Return: 0 on success, Negative value on errors
+ */
+static int axienet_get_ts_config(struct axienet_local *lp, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config = &lp->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config,
+ sizeof(*config)) ? -EFAULT : 0;
+}
+#endif
+
+/* Ioctl MII Interface */
+static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+#if defined (CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined (CONFIG_XILINX_TSN_PTP)
+ struct axienet_local *lp = netdev_priv(dev);
+#endif
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!dev->phydev)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
+#if defined (CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined (CONFIG_XILINX_TSN_PTP)
+ case SIOCSHWTSTAMP:
+ return axienet_set_ts_config(lp, rq);
+ case SIOCGHWTSTAMP:
+ return axienet_get_ts_config(lp, rq);
+#endif
+#ifdef CONFIG_XILINX_TSN_QBV
+ case SIOCCHIOCTL:
+ return axienet_set_schedule(dev, rq->ifr_data);
+ case SIOC_GET_SCHED:
+ return axienet_get_schedule(dev, rq->ifr_data);
+#endif
+#ifdef CONFIG_XILINX_TSN_QBR
+ case SIOC_PREEMPTION_CFG:
+ return axienet_preemption(dev, rq->ifr_data);
+ case SIOC_PREEMPTION_CTRL:
+ return axienet_preemption_ctrl(dev, rq->ifr_data);
+ case SIOC_PREEMPTION_STS:
+ return axienet_preemption_sts(dev, rq->ifr_data);
+ case SIOC_PREEMPTION_COUNTER:
+ return axienet_preemption_cnt(dev, rq->ifr_data);
+#ifdef CONFIG_XILINX_TSN_QBV
+ case SIOC_QBU_USER_OVERRIDE:
+ return axienet_qbu_user_override(dev, rq->ifr_data);
+ case SIOC_QBU_STS:
+ return axienet_qbu_sts(dev, rq->ifr_data);
+#endif
+#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops axienet_netdev_ops = {
.ndo_open = axienet_open,
.ndo_stop = axienet_stop,
@@ -1077,6 +2041,7 @@ static const struct net_device_ops axienet_netdev_ops = {
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = axienet_set_multicast_list,
+ .ndo_do_ioctl = axienet_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = axienet_poll_controller,
#endif
@@ -1151,18 +2116,18 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[15] = axienet_ior(lp, XAE_TC_OFFSET);
data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
- data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
+ data[18] = axienet_ior(lp, XAE_RMFC_OFFSET);
data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
- data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
- data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
- data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
- data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
+ data[23] = axienet_ior(lp, XAE_TEMAC_IS_OFFSET);
+ data[24] = axienet_ior(lp, XAE_TEMAC_IP_OFFSET);
+ data[25] = axienet_ior(lp, XAE_TEMAC_IE_OFFSET);
+ data[26] = axienet_ior(lp, XAE_TEMAC_IC_OFFSET);
data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
- data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
+ data[29] = axienet_ior(lp, XAE_FMC_OFFSET);
data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
}
@@ -1182,6 +2147,7 @@ axienet_ethtools_get_pauseparam(struct net_device *ndev,
{
u32 regval;
struct axienet_local *lp = netdev_priv(ndev);
+
epauseparm->autoneg = 0;
regval = axienet_ior(lp, XAE_FCC_OFFSET);
epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
@@ -1192,7 +2158,7 @@ axienet_ethtools_get_pauseparam(struct net_device *ndev,
* axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
* settings.
* @ndev: Pointer to net_device structure
- * @epauseparm:Pointer to ethtool_pauseparam structure
+ * @epauseparm: Pointer to ethtool_pauseparam structure
*
* This implements ethtool command for enabling flow control on Rx and Tx
* paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
@@ -1243,12 +2209,24 @@ static int axienet_ethtools_get_coalesce(struct net_device *ndev,
{
u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
- regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
- regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
+ struct axienet_dma_q *q;
+ int i;
+
+ for_each_rx_dma_queue(lp, i) {
+ q = lp->dq[i];
+
+ regval = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ ecoalesce->rx_max_coalesced_frames +=
+ (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ }
+ for_each_tx_dma_queue(lp, i) {
+ q = lp->dq[i];
+ regval = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ ecoalesce->tx_max_coalesced_frames +=
+ (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ }
return 0;
}
@@ -1303,6 +2281,32 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
return 0;
}
+#if defined (CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined (CONFIG_XILINX_TSN_PTP)
+/**
+ * axienet_ethtools_get_ts_info - Get h/w timestamping capabilities.
+ * @ndev: Pointer to net_device structure
+ * @info: Pointer to ethtool_ts_info structure
+ *
+ * Return: 0, on success, Non-zero error value on failure.
+ */
+static int axienet_ethtools_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ info->phc_index = 0;
+
+#ifdef CONFIG_XILINX_TSN_PTP
+ info->phc_index = axienet_phc_index;
+#endif
+ return 0;
+}
+#endif
+
static const struct ethtool_ops axienet_ethtool_ops = {
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
@@ -1312,142 +2316,470 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.set_pauseparam = axienet_ethtools_set_pauseparam,
.get_coalesce = axienet_ethtools_get_coalesce,
.set_coalesce = axienet_ethtools_set_coalesce,
+#if defined (CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined (CONFIG_XILINX_TSN_PTP)
+ .get_ts_info = axienet_ethtools_get_ts_info,
+#endif
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ .get_sset_count = axienet_sset_count,
+ .get_ethtool_stats = axienet_get_stats,
+ .get_strings = axienet_strings,
+#endif
};
-/**
- * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
- * @data: Data passed
- *
- * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
- * Tx/Rx BDs.
- */
-static void axienet_dma_err_handler(unsigned long data)
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+static int __maybe_unused axienet_mcdma_probe(struct platform_device *pdev,
+ struct axienet_local *lp,
+ struct net_device *ndev)
{
- u32 axienet_status;
- u32 cr, i;
- int mdio_mcreg;
- struct axienet_local *lp = (struct axienet_local *) data;
- struct net_device *ndev = lp->ndev;
- struct axidma_bd *cur_p;
+ int i, ret = 0;
+ struct axienet_dma_q *q;
+ struct device_node *np;
+ struct resource dmares;
+ const char *str;
- axienet_setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
- axienet_mdio_wait_until_ready(lp);
- /* Disable the MDIO interface till Axi Ethernet Reset is completed.
- * When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. So if MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards.
- */
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
- ~XAE_MDIO_MC_MDIOEN_MASK));
+ ret = of_property_count_strings(pdev->dev.of_node, "xlnx,channel-ids");
+ if (ret < 0)
+ return -EINVAL;
- __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
+ for_each_rx_dma_queue(lp, i) {
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+
+ /* parent */
+ q->lp = lp;
+ lp->dq[i] = q;
+ ret = of_property_read_string_index(pdev->dev.of_node,
+ "xlnx,channel-ids", i,
+ &str);
+ ret = kstrtou16(str, 16, &q->chan_id);
+ lp->qnum[i] = i;
+ lp->chan_num[i] = q->chan_id;
+ }
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
- axienet_mdio_wait_until_ready(lp);
+ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected",
+ 0);
+ if (IS_ERR(np)) {
+ dev_err(&pdev->dev, "could not find DMA node\n");
+ return ret;
+ }
- for (i = 0; i < TX_BD_NUM; i++) {
- cur_p = &lp->tx_bd_v[i];
- if (cur_p->phys)
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- (cur_p->cntrl &
- XAXIDMA_BD_CTRL_LENGTH_MASK),
- DMA_TO_DEVICE);
- if (cur_p->app4)
- dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
- cur_p->phys = 0;
- cur_p->cntrl = 0;
- cur_p->status = 0;
- cur_p->app0 = 0;
- cur_p->app1 = 0;
- cur_p->app2 = 0;
- cur_p->app3 = 0;
- cur_p->app4 = 0;
- cur_p->sw_id_offset = 0;
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get DMA resource\n");
+ return ret;
}
- for (i = 0; i < RX_BD_NUM; i++) {
- cur_p = &lp->rx_bd_v[i];
- cur_p->status = 0;
- cur_p->app0 = 0;
- cur_p->app1 = 0;
- cur_p->app2 = 0;
- cur_p->app3 = 0;
- cur_p->app4 = 0;
+ ret = of_property_read_u8(np, "xlnx,addrwidth", (u8 *)&lp->dma_mask);
+ if (ret < 0 || lp->dma_mask < XAE_DMA_MASK_MIN ||
+ lp->dma_mask > XAE_DMA_MASK_MAX) {
+ dev_info(&pdev->dev, "missing/invalid xlnx,addrwidth property, using default\n");
+ lp->dma_mask = XAE_DMA_MASK_MIN;
+ }
+
+ lp->mcdma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
+ if (IS_ERR(lp->mcdma_regs)) {
+ dev_err(&pdev->dev, "iormeap failed for the dma\n");
+ ret = PTR_ERR(lp->mcdma_regs);
+ return ret;
+ }
+
+ axienet_mcdma_tx_probe(pdev, np, lp);
+ axienet_mcdma_rx_probe(pdev, lp, ndev);
+
+ return 0;
+}
+#endif
+
+static int __maybe_unused axienet_dma_probe(struct platform_device *pdev,
+ struct net_device *ndev)
+{
+ int i, ret;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
+ struct device_node *np = NULL;
+ struct resource dmares;
+#ifdef CONFIG_XILINX_TSN
+ char dma_name[10];
+#endif
+
+ for_each_rx_dma_queue(lp, i) {
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+
+ /* parent */
+ q->lp = lp;
+
+ lp->dq[i] = q;
+ }
+
+ /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
+ /* TODO handle error ret */
+ for_each_rx_dma_queue(lp, i) {
+ q = lp->dq[i];
+
+ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected",
+ i);
+ if (np) {
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret >= 0)
+ q->dma_regs = devm_ioremap_resource(&pdev->dev,
+ &dmares);
+ else
+ return -ENODEV;
+ q->eth_hasdre = of_property_read_bool(np,
+ "xlnx,include-dre");
+ ret = of_property_read_u8(np, "xlnx,addrwidth",
+ (u8 *)&lp->dma_mask);
+ if (ret < 0 || lp->dma_mask < XAE_DMA_MASK_MIN ||
+ lp->dma_mask > XAE_DMA_MASK_MAX) {
+ dev_info(&pdev->dev, "missing/invalid xlnx,addrwidth property, using default\n");
+ lp->dma_mask = XAE_DMA_MASK_MIN;
+ }
+
+ } else {
+ return -EINVAL;
+ }
+ }
+
+#ifdef CONFIG_XILINX_TSN
+ if (lp->is_tsn) {
+ for_each_rx_dma_queue(lp, i) {
+ sprintf(dma_name, "dma%d_tx", i);
+ lp->dq[i]->tx_irq = platform_get_irq_byname(pdev,
+ dma_name);
+ sprintf(dma_name, "dma%d_rx", i);
+ lp->dq[i]->rx_irq = platform_get_irq_byname(pdev,
+ dma_name);
+ pr_info("lp->dq[%d]->tx_irq %d\n", i,
+ lp->dq[i]->tx_irq);
+ pr_info("lp->dq[%d]->rx_irq %d\n", i,
+ lp->dq[i]->rx_irq);
+ }
+ } else {
+#endif /* This should remove when axienet device tree irq comply to dma name */
+ for_each_rx_dma_queue(lp, i) {
+ lp->dq[i]->tx_irq = irq_of_parse_and_map(np, 0);
+ lp->dq[i]->rx_irq = irq_of_parse_and_map(np, 1);
+ }
+#ifdef CONFIG_XILINX_TSN
+ }
+#endif
+
+ of_node_put(np);
+
+ for_each_rx_dma_queue(lp, i) {
+ struct axienet_dma_q *q = lp->dq[i];
+
+ spin_lock_init(&q->tx_lock);
+ spin_lock_init(&q->rx_lock);
}
- lp->tx_bd_ci = 0;
- lp->tx_bd_tail = 0;
- lp->rx_bd_ci = 0;
-
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
+ for_each_rx_dma_queue(lp, i) {
+ netif_napi_add(ndev, &lp->napi[i], xaxienet_rx_poll,
+ XAXIENET_NAPI_WEIGHT);
+ }
+
+ return 0;
+}
+
+static int axienet_clk_init(struct platform_device *pdev,
+ struct clk **axi_aclk, struct clk **axis_clk,
+ struct clk **ref_clk, struct clk **tmpclk)
+{
+ int err;
+
+ *tmpclk = NULL;
+
+ /* The "ethernet_clk" is deprecated and will be removed sometime in
+ * the future. For proper clock usage check axiethernet binding
+ * documentation.
*/
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting
+ *axi_aclk = devm_clk_get(&pdev->dev, "ethernet_clk");
+ if (IS_ERR(*axi_aclk)) {
+ if (PTR_ERR(*axi_aclk) != -ENOENT) {
+ err = PTR_ERR(*axi_aclk);
+ return err;
+ }
+
+ *axi_aclk = devm_clk_get(&pdev->dev, "s_axi_lite_clk");
+ if (IS_ERR(*axi_aclk)) {
+ if (PTR_ERR(*axi_aclk) != -ENOENT) {
+ err = PTR_ERR(*axi_aclk);
+ return err;
+ }
+ *axi_aclk = NULL;
+ }
+
+ } else {
+ dev_warn(&pdev->dev, "ethernet_clk is deprecated and will be removed sometime in the future\n");
+ }
+
+ *axis_clk = devm_clk_get(&pdev->dev, "axis_clk");
+ if (IS_ERR(*axis_clk)) {
+ if (PTR_ERR(*axis_clk) != -ENOENT) {
+ err = PTR_ERR(*axis_clk);
+ return err;
+ }
+ *axis_clk = NULL;
+ }
+
+ *ref_clk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(*ref_clk)) {
+ if (PTR_ERR(*ref_clk) != -ENOENT) {
+ err = PTR_ERR(*ref_clk);
+ return err;
+ }
+ *ref_clk = NULL;
+ }
+
+ err = clk_prepare_enable(*axi_aclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_aclk/ethernet_clk (%d)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*axis_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axis_clk (%d)\n", err);
+ goto err_disable_axi_aclk;
+ }
+
+ err = clk_prepare_enable(*ref_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable ref_clk (%d)\n", err);
+ goto err_disable_axis_clk;
+ }
+
+ return 0;
+
+err_disable_axis_clk:
+ clk_disable_unprepare(*axis_clk);
+err_disable_axi_aclk:
+ clk_disable_unprepare(*axi_aclk);
+
+ return err;
+}
+
+static int axienet_dma_clk_init(struct platform_device *pdev)
+{
+ int err;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ /* The "dma_clk" is deprecated and will be removed sometime in
+ * the future. For proper clock usage check axiethernet binding
+ * documentation.
*/
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
+ lp->dma_tx_clk = devm_clk_get(&pdev->dev, "dma_clk");
+ if (IS_ERR(lp->dma_tx_clk)) {
+ if (PTR_ERR(lp->dma_tx_clk) != -ENOENT) {
+ err = PTR_ERR(lp->dma_tx_clk);
+ return err;
+ }
+
+ lp->dma_tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+ if (IS_ERR(lp->dma_tx_clk)) {
+ if (PTR_ERR(lp->dma_tx_clk) != -ENOENT) {
+ err = PTR_ERR(lp->dma_tx_clk);
+ return err;
+ }
+ lp->dma_tx_clk = NULL;
+ }
+ } else {
+ dev_warn(&pdev->dev, "dma_clk is deprecated and will be removed sometime in the future\n");
+ }
- axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
- axienet_status &= ~XAE_RCW1_RX_MASK;
- axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ lp->dma_rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+ if (IS_ERR(lp->dma_rx_clk)) {
+ if (PTR_ERR(lp->dma_rx_clk) != -ENOENT) {
+ err = PTR_ERR(lp->dma_rx_clk);
+ return err;
+ }
+ lp->dma_rx_clk = NULL;
+ }
- axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
- if (axienet_status & XAE_INT_RXRJECT_MASK)
- axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
- axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+ lp->dma_sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
+ if (IS_ERR(lp->dma_sg_clk)) {
+ if (PTR_ERR(lp->dma_sg_clk) != -ENOENT) {
+ err = PTR_ERR(lp->dma_sg_clk);
+ return err;
+ }
+ lp->dma_sg_clk = NULL;
+ }
+
+ err = clk_prepare_enable(lp->dma_tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk/dma_clk (%d)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(lp->dma_rx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
+ goto err_disable_txclk;
+ }
+
+ err = clk_prepare_enable(lp->dma_sg_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
+ goto err_disable_rxclk;
+ }
+
+ return 0;
+
+err_disable_rxclk:
+ clk_disable_unprepare(lp->dma_rx_clk);
+err_disable_txclk:
+ clk_disable_unprepare(lp->dma_tx_clk);
+
+ return err;
+}
+
+static void axienet_clk_disable(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct axienet_local *lp = netdev_priv(ndev);
- /* Sync default options with HW but leave receiver and
- * transmitter disabled.
+ clk_disable_unprepare(lp->dma_sg_clk);
+ clk_disable_unprepare(lp->dma_tx_clk);
+ clk_disable_unprepare(lp->dma_rx_clk);
+ clk_disable_unprepare(lp->eth_sclk);
+ clk_disable_unprepare(lp->eth_refclk);
+ clk_disable_unprepare(lp->eth_dclk);
+ clk_disable_unprepare(lp->aclk);
+}
+
+static int xxvenet_clk_init(struct platform_device *pdev,
+ struct clk **axi_aclk, struct clk **axis_clk,
+ struct clk **tmpclk, struct clk **dclk)
+{
+ int err;
+
+ *tmpclk = NULL;
+
+ /* The "ethernet_clk" is deprecated and will be removed sometime in
+ * the future. For proper clock usage check axiethernet binding
+ * documentation.
*/
- axienet_setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- axienet_set_mac_address(ndev, NULL);
- axienet_set_multicast_list(ndev);
- axienet_setoptions(ndev, lp->options);
+ *axi_aclk = devm_clk_get(&pdev->dev, "ethernet_clk");
+ if (IS_ERR(*axi_aclk)) {
+ if (PTR_ERR(*axi_aclk) != -ENOENT) {
+ err = PTR_ERR(*axi_aclk);
+ return err;
+ }
+
+ *axi_aclk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(*axi_aclk)) {
+ if (PTR_ERR(*axi_aclk) != -ENOENT) {
+ err = PTR_ERR(*axi_aclk);
+ return err;
+ }
+ *axi_aclk = NULL;
+ }
+
+ } else {
+ dev_warn(&pdev->dev, "ethernet_clk is deprecated and will be removed sometime in the future\n");
+ }
+
+ *axis_clk = devm_clk_get(&pdev->dev, "rx_core_clk");
+ if (IS_ERR(*axis_clk)) {
+ if (PTR_ERR(*axis_clk) != -ENOENT) {
+ err = PTR_ERR(*axis_clk);
+ return err;
+ }
+ *axis_clk = NULL;
+ }
+
+ *dclk = devm_clk_get(&pdev->dev, "dclk");
+ if (IS_ERR(*dclk)) {
+ if (PTR_ERR(*dclk) != -ENOENT) {
+ err = PTR_ERR(*dclk);
+ return err;
+ }
+ *dclk = NULL;
+ }
+
+ err = clk_prepare_enable(*axi_aclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk/ethernet_clk (%d)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*axis_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axis_clk (%d)\n", err);
+ goto err_disable_axi_aclk;
+ }
+
+ err = clk_prepare_enable(*dclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable dclk (%d)\n", err);
+ goto err_disable_axis_clk;
+ }
+
+ return 0;
+
+err_disable_axis_clk:
+ clk_disable_unprepare(*axis_clk);
+err_disable_axi_aclk:
+ clk_disable_unprepare(*axi_aclk);
+
+ return err;
}
+static const struct axienet_config axienet_1g_config = {
+ .mactype = XAXIENET_1G,
+ .setoptions = axienet_setoptions,
+ .clk_init = axienet_clk_init,
+ .tx_ptplen = XAE_TX_PTP_LEN,
+};
+
+static const struct axienet_config axienet_2_5g_config = {
+ .mactype = XAXIENET_2_5G,
+ .setoptions = axienet_setoptions,
+ .clk_init = axienet_clk_init,
+ .tx_ptplen = XAE_TX_PTP_LEN,
+};
+
+static const struct axienet_config axienet_10g_config = {
+ .mactype = XAXIENET_LEGACY_10G,
+ .setoptions = axienet_setoptions,
+ .clk_init = xxvenet_clk_init,
+ .tx_ptplen = XAE_TX_PTP_LEN,
+};
+
+static const struct axienet_config axienet_10g25g_config = {
+ .mactype = XAXIENET_10G_25G,
+ .setoptions = xxvenet_setoptions,
+ .clk_init = xxvenet_clk_init,
+ .tx_ptplen = XXV_TX_PTP_LEN,
+};
+
+static const struct axienet_config axienet_usxgmii_config = {
+ .mactype = XAXIENET_10G_25G,
+ .setoptions = xxvenet_setoptions,
+ .clk_init = xxvenet_clk_init,
+ .tx_ptplen = 0,
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id axienet_of_match[] = {
+ { .compatible = "xlnx,axi-ethernet-1.00.a", .data = &axienet_1g_config},
+ { .compatible = "xlnx,axi-ethernet-1.01.a", .data = &axienet_1g_config},
+ { .compatible = "xlnx,axi-ethernet-2.01.a", .data = &axienet_1g_config},
+ { .compatible = "xlnx,axi-2_5-gig-ethernet-1.0",
+ .data = &axienet_2_5g_config},
+ { .compatible = "xlnx,ten-gig-eth-mac", .data = &axienet_10g_config},
+ { .compatible = "xlnx,xxv-ethernet-1.0",
+ .data = &axienet_10g25g_config},
+ { .compatible = "xlnx,tsn-ethernet-1.00.a", .data = &axienet_1g_config},
+ { .compatible = "xlnx,xxv-usxgmii-ethernet-1.0",
+ .data = &axienet_usxgmii_config},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, axienet_of_match);
+
/**
* axienet_probe - Axi Ethernet probe function.
* @pdev: Pointer to platform device structure.
@@ -1462,15 +2794,40 @@ static void axienet_dma_err_handler(unsigned long data)
*/
static int axienet_probe(struct platform_device *pdev)
{
+ int (*axienet_clk_init)(struct platform_device *pdev,
+ struct clk **axi_aclk, struct clk **axis_clk,
+ struct clk **ref_clk, struct clk **tmpclk) =
+ axienet_clk_init;
int ret;
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
struct device_node *np;
+#endif
struct axienet_local *lp;
struct net_device *ndev;
const void *mac_addr;
- struct resource *ethres, dmares;
+ struct resource *ethres;
u32 value;
+ u16 num_queues = XAE_MAX_QUEUES;
+ bool slave = false;
+ bool is_tsn = false;
+
+ is_tsn = of_property_read_bool(pdev->dev.of_node, "xlnx,tsn");
+ ret = of_property_read_u16(pdev->dev.of_node, "xlnx,num-queues",
+ &num_queues);
+ if (ret) {
+ if (!is_tsn) {
+#ifndef CONFIG_AXIENET_HAS_MCDMA
+ num_queues = 1;
+#endif
+ }
+ }
+#ifdef CONFIG_XILINX_TSN
+ if (is_tsn && (num_queues < XAE_TSN_MIN_QUEUES ||
+ num_queues > XAE_MAX_QUEUES))
+ num_queues = XAE_MAX_QUEUES;
+#endif
- ndev = alloc_etherdev(sizeof(*lp));
+ ndev = alloc_etherdev_mq(sizeof(*lp), num_queues);
if (!ndev)
return -ENOMEM;
@@ -1490,34 +2847,66 @@ static int axienet_probe(struct platform_device *pdev)
lp->ndev = ndev;
lp->dev = &pdev->dev;
lp->options = XAE_OPTION_DEFAULTS;
+ lp->num_tx_queues = num_queues;
+ lp->num_rx_queues = num_queues;
+ lp->is_tsn = is_tsn;
+
+#ifdef CONFIG_XILINX_TSN
+ ret = of_property_read_u16(pdev->dev.of_node, "xlnx,num-tc",
+ &lp->num_tc);
+ if (ret || (lp->num_tc != 2 && lp->num_tc != 3))
+ lp->num_tc = XAE_MAX_TSN_TC;
+#endif
+
/* Map device registers */
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
+
if (IS_ERR(lp->regs)) {
- dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
ret = PTR_ERR(lp->regs);
goto free_netdev;
}
+#ifdef CONFIG_XILINX_TSN
+ slave = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,tsn-slave");
+ if (slave)
+ lp->temac_no = XAE_TEMAC2;
+ else
+ lp->temac_no = XAE_TEMAC1;
+#endif
+
/* Setup checksum offload, but default to off if not specified */
lp->features = 0;
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(axienet_of_match, pdev->dev.of_node);
+ if (match && match->data) {
+ lp->axienet_config = match->data;
+ axienet_clk_init = lp->axienet_config->clk_init;
+ }
+ }
+
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
if (!ret) {
+ dev_info(&pdev->dev, "TX_CSUM %d\n", value);
+
switch (value) {
case 1:
lp->csum_offload_on_tx_path =
XAE_FEATURE_PARTIAL_TX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
- ndev->features |= NETIF_F_IP_CSUM;
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
break;
case 2:
lp->csum_offload_on_tx_path =
XAE_FEATURE_FULL_TX_CSUM;
lp->features |= XAE_FEATURE_FULL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
- ndev->features |= NETIF_F_IP_CSUM;
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
break;
default:
lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
@@ -1525,6 +2914,8 @@ static int axienet_probe(struct platform_device *pdev)
}
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
if (!ret) {
+ dev_info(&pdev->dev, "RX_CSUM %d\n", value);
+
switch (value) {
case 1:
lp->csum_offload_on_rx_path =
@@ -1548,78 +2939,138 @@ static int axienet_probe(struct platform_device *pdev)
*/
of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
- /* Start with the proprietary, and broken phy_type */
- ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
- if (!ret) {
- netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
- switch (value) {
- case XAE_PHY_TYPE_MII:
- lp->phy_mode = PHY_INTERFACE_MODE_MII;
- break;
- case XAE_PHY_TYPE_GMII:
- lp->phy_mode = PHY_INTERFACE_MODE_GMII;
- break;
- case XAE_PHY_TYPE_RGMII_2_0:
- lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
- break;
- case XAE_PHY_TYPE_SGMII:
- lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
- break;
- case XAE_PHY_TYPE_1000BASE_X:
- lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
- break;
- default:
- ret = -EINVAL;
+ /* The phy_mode is optional but when it is not specified it should not
+ * be a value that alters the driver behavior so set it to an invalid
+ * value as the default.
+ */
+ lp->phy_mode = ~0;
+ of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_mode);
+
+ /* Set default USXGMII rate */
+ lp->usxgmii_rate = SPEED_1000;
+ of_property_read_u32(pdev->dev.of_node, "xlnx,usxgmii-rate",
+ &lp->usxgmii_rate);
+
+ lp->eth_hasnobuf = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,eth-hasnobuf");
+ lp->eth_hasptp = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,eth-hasptp");
+
+ if ((lp->axienet_config->mactype == XAXIENET_1G) && !lp->eth_hasnobuf)
+ lp->eth_irq = platform_get_irq(pdev, 0);
+
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (!lp->is_tsn) {
+ struct resource txtsres, rxtsres;
+
+ /* Find AXI Stream FIFO */
+ np = of_parse_phandle(pdev->dev.of_node, "axififo-connected",
+ 0);
+ if (IS_ERR(np)) {
+ dev_err(&pdev->dev, "could not find TX Timestamp FIFO\n");
+ ret = PTR_ERR(np);
goto free_netdev;
}
- } else {
- lp->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if (lp->phy_mode < 0) {
- ret = -EINVAL;
+
+ ret = of_address_to_resource(np, 0, &txtsres);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to get Tx Timestamp resource\n");
goto free_netdev;
}
- }
- /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not find DMA node\n");
- ret = -ENODEV;
- goto free_netdev;
- }
- ret = of_address_to_resource(np, 0, &dmares);
- if (ret) {
- dev_err(&pdev->dev, "unable to get DMA resource\n");
+ lp->tx_ts_regs = devm_ioremap_resource(&pdev->dev, &txtsres);
+ if (IS_ERR(lp->tx_ts_regs)) {
+ dev_err(&pdev->dev, "could not map Tx Timestamp regs\n");
+ ret = PTR_ERR(lp->tx_ts_regs);
+ goto free_netdev;
+ }
+
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ np = of_parse_phandle(pdev->dev.of_node,
+ "xlnx,rxtsfifo", 0);
+ if (IS_ERR(np)) {
+ dev_err(&pdev->dev,
+ "couldn't find rx-timestamp FIFO\n");
+ ret = PTR_ERR(np);
+ goto free_netdev;
+ }
+
+ ret = of_address_to_resource(np, 0, &rxtsres);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to get rx-timestamp resource\n");
+ goto free_netdev;
+ }
+
+ lp->rx_ts_regs = devm_ioremap_resource(&pdev->dev,
+ &rxtsres);
+ if (IS_ERR(lp->rx_ts_regs)) {
+ dev_err(&pdev->dev,
+ "couldn't map rx-timestamp regs\n");
+ ret = PTR_ERR(lp->rx_ts_regs);
+ goto free_netdev;
+ }
+ lp->tx_ptpheader = devm_kzalloc(&pdev->dev,
+ XXVENET_TS_HEADER_LEN,
+ GFP_KERNEL);
+ }
+
of_node_put(np);
- goto free_netdev;
}
- lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
- if (IS_ERR(lp->dma_regs)) {
- dev_err(&pdev->dev, "could not map DMA regs\n");
- ret = PTR_ERR(lp->dma_regs);
- of_node_put(np);
- goto free_netdev;
+#endif
+
+ if (!slave) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ ret = axienet_mcdma_probe(pdev, lp, ndev);
+#else
+ ret = axienet_dma_probe(pdev, ndev);
+#endif
+ if (ret) {
+ pr_err("Getting DMA resource failed\n");
+ goto free_netdev;
+ }
+
+ if (dma_set_mask_and_coherent(lp->dev, DMA_BIT_MASK(lp->dma_mask)) != 0) {
+ dev_warn(&pdev->dev, "default to %d-bit dma mask\n", XAE_DMA_MASK_MIN);
+ if (dma_set_mask_and_coherent(lp->dev, DMA_BIT_MASK(XAE_DMA_MASK_MIN)) != 0) {
+ dev_err(&pdev->dev, "dma_set_mask_and_coherent failed, aborting\n");
+ goto free_netdev;
+ }
+ }
+
+ ret = axienet_dma_clk_init(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "DMA clock init failed %d\n", ret);
+ goto free_netdev;
+ }
}
- lp->rx_irq = irq_of_parse_and_map(np, 1);
- lp->tx_irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
- if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
- dev_err(&pdev->dev, "could not determine irqs\n");
- ret = -ENOMEM;
- goto free_netdev;
+
+ ret = axienet_clk_init(pdev, &lp->aclk, &lp->eth_sclk,
+ &lp->eth_refclk, &lp->eth_dclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Ethernet clock init failed %d\n", ret);
+ goto err_disable_clk;
}
/* Retrieve the MAC address */
mac_addr = of_get_mac_address(pdev->dev.of_node);
if (IS_ERR(mac_addr)) {
dev_err(&pdev->dev, "could not find MAC address\n");
- goto free_netdev;
+ goto err_disable_clk;
}
axienet_set_mac_address(ndev, mac_addr);
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ ret = of_get_phy_mode(pdev->dev.of_node);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "couldn't find phy i/f\n");
+ lp->phy_interface = ret;
+ if (lp->phy_mode == XAE_PHY_TYPE_1000BASE_X)
+ lp->phy_flags = XAE_PHY_TYPE_1000BASE_X;
+
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) {
ret = axienet_mdio_setup(lp, pdev->dev.of_node);
@@ -1627,14 +3078,52 @@ static int axienet_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "error registering MDIO bus\n");
}
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ /* Create sysfs file entries for the device */
+ ret = axeinet_mcdma_create_sysfs(&lp->dev->kobj);
+ if (ret < 0) {
+ dev_err(lp->dev, "unable to create sysfs entries\n");
+ return ret;
+ }
+#endif
+
ret = register_netdev(lp->ndev);
if (ret) {
dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
- goto free_netdev;
+ axienet_mdio_teardown(lp);
+ goto err_disable_clk;
}
- return 0;
+#ifdef CONFIG_XILINX_TSN_PTP
+ if (lp->is_tsn) {
+ lp->ptp_rx_irq = platform_get_irq_byname(pdev, "ptp_rx");
+
+ lp->ptp_tx_irq = platform_get_irq_byname(pdev, "ptp_tx");
+
+ lp->qbv_irq = platform_get_irq_byname(pdev, "qbv_irq");
+
+ pr_debug("ptp RX irq: %d\n", lp->ptp_rx_irq);
+ pr_debug("ptp TX irq: %d\n", lp->ptp_tx_irq);
+ pr_debug("qbv_irq: %d\n", lp->qbv_irq);
+
+ spin_lock_init(&lp->ptp_tx_lock);
+
+ if (lp->temac_no == XAE_TEMAC1) {
+ axienet_ptp_timer_probe(
+ (lp->regs + XAE_RTC_OFFSET), pdev);
+ /* enable VLAN */
+ lp->options |= XAE_OPTION_VLAN;
+ axienet_setoptions(lp->ndev, lp->options);
+#ifdef CONFIG_XILINX_TSN_QBV
+ axienet_qbv_init(ndev);
+#endif
+ }
+ }
+#endif
+ return 0;
+err_disable_clk:
+ axienet_clk_disable(pdev);
free_netdev:
free_netdev(ndev);
@@ -1645,10 +3134,27 @@ static int axienet_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
+ int i;
- axienet_mdio_teardown(lp);
+ if (lp->mii_bus)
+ axienet_mdio_teardown(lp);
+
+#ifdef CONFIG_XILINX_TSN_PTP
+ axienet_ptp_timer_remove(lp->timer_priv);
+#ifdef CONFIG_XILINX_TSN_QBV
+ axienet_qbv_remove(ndev);
+#endif
+#endif
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ for_each_rx_dma_queue(lp, i)
+ netif_napi_del(&lp->napi[i]);
+ }
unregister_netdev(ndev);
+ axienet_clk_disable(pdev);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axeinet_mcdma_remove_sysfs(&lp->dev->kobj);
+#endif
of_node_put(lp->phy_node);
lp->phy_node = NULL;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mcdma.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mcdma.c
new file mode 100644
index 000000000000..9c2056e1b592
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mcdma.c
@@ -0,0 +1,1043 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Xilinx AXI Ethernet (MCDMA programming)
+ *
+ * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
+ * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
+ * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
+ * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2010 - 2012 Xilinx, Inc.
+ * Copyright (C) 2018 Xilinx, Inc. All rights reserved.
+ *
+ * This file contains helper functions for AXI MCDMA TX and RX programming.
+ */
+
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+
+#include "xilinx_axienet.h"
+
+struct axienet_stat {
+ const char *name;
+};
+
+static struct axienet_stat axienet_get_tx_strings_stats[] = {
+ { "txq0_packets" },
+ { "txq0_bytes" },
+ { "txq1_packets" },
+ { "txq1_bytes" },
+ { "txq2_packets" },
+ { "txq2_bytes" },
+ { "txq3_packets" },
+ { "txq3_bytes" },
+ { "txq4_packets" },
+ { "txq4_bytes" },
+ { "txq5_packets" },
+ { "txq5_bytes" },
+ { "txq6_packets" },
+ { "txq6_bytes" },
+ { "txq7_packets" },
+ { "txq7_bytes" },
+ { "txq8_packets" },
+ { "txq8_bytes" },
+ { "txq9_packets" },
+ { "txq9_bytes" },
+ { "txq10_packets" },
+ { "txq10_bytes" },
+ { "txq11_packets" },
+ { "txq11_bytes" },
+ { "txq12_packets" },
+ { "txq12_bytes" },
+ { "txq13_packets" },
+ { "txq13_bytes" },
+ { "txq14_packets" },
+ { "txq14_bytes" },
+ { "txq15_packets" },
+ { "txq15_bytes" },
+};
+
+static struct axienet_stat axienet_get_rx_strings_stats[] = {
+ { "rxq0_packets" },
+ { "rxq0_bytes" },
+ { "rxq1_packets" },
+ { "rxq1_bytes" },
+ { "rxq2_packets" },
+ { "rxq2_bytes" },
+ { "rxq3_packets" },
+ { "rxq3_bytes" },
+ { "rxq4_packets" },
+ { "rxq4_bytes" },
+ { "rxq5_packets" },
+ { "rxq5_bytes" },
+ { "rxq6_packets" },
+ { "rxq6_bytes" },
+ { "rxq7_packets" },
+ { "rxq7_bytes" },
+ { "rxq8_packets" },
+ { "rxq8_bytes" },
+ { "rxq9_packets" },
+ { "rxq9_bytes" },
+ { "rxq10_packets" },
+ { "rxq10_bytes" },
+ { "rxq11_packets" },
+ { "rxq11_bytes" },
+ { "rxq12_packets" },
+ { "rxq12_bytes" },
+ { "rxq13_packets" },
+ { "rxq13_bytes" },
+ { "rxq14_packets" },
+ { "rxq14_bytes" },
+ { "rxq15_packets" },
+ { "rxq15_bytes" },
+};
+
+/**
+ * axienet_mcdma_tx_bd_free - Release MCDMA Tx buffer descriptor rings
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * This function is used to release the descriptors allocated in
+ * axienet_mcdma_tx_q_init.
+ */
+void __maybe_unused axienet_mcdma_tx_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ if (q->txq_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*q->txq_bd_v) * TX_BD_NUM,
+ q->txq_bd_v,
+ q->tx_bd_p);
+ }
+ if (q->tx_bufs) {
+ dma_free_coherent(ndev->dev.parent,
+ XAE_MAX_PKT_LEN * TX_BD_NUM,
+ q->tx_bufs,
+ q->tx_bufs_dma);
+ }
+}
+
+/**
+ * axienet_mcdma_rx_bd_free - Release MCDMA Rx buffer descriptor rings
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * This function is used to release the descriptors allocated in
+ * axienet_mcdma_rx_q_init.
+ */
+void __maybe_unused axienet_mcdma_rx_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ int i;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ dma_unmap_single(ndev->dev.parent, q->rxq_bd_v[i].phys,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ dev_kfree_skb((struct sk_buff *)
+ (q->rxq_bd_v[i].sw_id_offset));
+ }
+
+ if (q->rxq_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*q->rxq_bd_v) * RX_BD_NUM,
+ q->rxq_bd_v,
+ q->rx_bd_p);
+ }
+}
+
+/**
+ * axienet_mcdma_tx_q_init - Setup buffer descriptor rings for individual Axi
+ * MCDMA-Tx
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_bd_init
+ */
+int __maybe_unused axienet_mcdma_tx_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ u32 cr, chan_en;
+ int i;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+
+ q->txq_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*q->txq_bd_v) * TX_BD_NUM,
+ &q->tx_bd_p, GFP_KERNEL);
+ if (!q->txq_bd_v)
+ goto out;
+
+ if (!q->eth_hasdre) {
+ q->tx_bufs = dma_alloc_coherent(ndev->dev.parent,
+ XAE_MAX_PKT_LEN * TX_BD_NUM,
+ &q->tx_bufs_dma,
+ GFP_KERNEL);
+ if (!q->tx_bufs)
+ goto out;
+
+ for (i = 0; i < TX_BD_NUM; i++)
+ q->tx_buf[i] = &q->tx_bufs[i * XAE_MAX_PKT_LEN];
+ }
+
+ for (i = 0; i < TX_BD_NUM; i++) {
+ q->txq_bd_v[i].next = q->tx_bd_p +
+ sizeof(*q->txq_bd_v) *
+ ((i + 1) % TX_BD_NUM);
+ }
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XMCDMA_COALESCE_MASK)) |
+ ((lp->coalesce_count_tx) << XMCDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XMCDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XMCDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XMCDMA_IRQ_ALL_MASK;
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id),
+ q->tx_bd_p);
+ cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET);
+ axienet_dma_out32(q, XMCDMA_CR_OFFSET,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id),
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET);
+ chan_en |= (1 << (q->chan_id - 1));
+ axienet_dma_out32(q, XMCDMA_CHEN_OFFSET, chan_en);
+
+ return 0;
+out:
+ for_each_tx_dma_queue(lp, i) {
+ axienet_mcdma_tx_bd_free(ndev, lp->dq[i]);
+ }
+ return -ENOMEM;
+}
+
+/**
+ * axienet_mcdma_rx_q_init - Setup buffer descriptor rings for individual Axi
+ * MCDMA-Rx
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_bd_init
+ */
+int __maybe_unused axienet_mcdma_rx_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ u32 cr, chan_en;
+ int i;
+ struct sk_buff *skb;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ q->rx_bd_ci = 0;
+ q->rx_offset = XMCDMA_CHAN_RX_OFFSET;
+
+ q->rxq_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*q->rxq_bd_v) * RX_BD_NUM,
+ &q->rx_bd_p, GFP_KERNEL);
+ if (!q->rxq_bd_v)
+ goto out;
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ q->rxq_bd_v[i].next = q->rx_bd_p +
+ sizeof(*q->rxq_bd_v) *
+ ((i + 1) % RX_BD_NUM);
+
+ skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ if (!skb)
+ goto out;
+
+ /* Ensure that the skb is completely updated
+ * prio to mapping the DMA
+ */
+ wmb();
+
+ q->rxq_bd_v[i].sw_id_offset = (phys_addr_t)skb;
+ q->rxq_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+ skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ q->rxq_bd_v[i].cntrl = lp->max_frm_size;
+ }
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XMCDMA_COALESCE_MASK) |
+ ((lp->coalesce_count_rx) << XMCDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XMCDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XMCDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XMCDMA_IRQ_ALL_MASK;
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id) +
+ q->rx_offset, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET + q->rx_offset);
+ axienet_dma_out32(q, XMCDMA_CR_OFFSET + q->rx_offset,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) + q->rx_offset,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
+ q->rx_offset, q->rx_bd_p + (sizeof(*q->rxq_bd_v) *
+ (RX_BD_NUM - 1)));
+ chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET + q->rx_offset);
+ chan_en |= (1 << (q->chan_id - 1));
+ axienet_dma_out32(q, XMCDMA_CHEN_OFFSET + q->rx_offset, chan_en);
+
+ return 0;
+
+out:
+ for_each_rx_dma_queue(lp, i) {
+ axienet_mcdma_rx_bd_free(ndev, lp->dq[i]);
+ }
+ return -ENOMEM;
+}
+
+static inline int get_mcdma_tx_q(struct axienet_local *lp, u32 chan_id)
+{
+ int i;
+
+ for_each_tx_dma_queue(lp, i) {
+ if (chan_id == lp->chan_num[i])
+ return lp->qnum[i];
+ }
+
+ return -ENODEV;
+}
+
+static inline int get_mcdma_rx_q(struct axienet_local *lp, u32 chan_id)
+{
+ int i;
+
+ for_each_rx_dma_queue(lp, i) {
+ if (chan_id == lp->chan_num[i])
+ return lp->qnum[i];
+ }
+
+ return -ENODEV;
+}
+
+static inline int map_dma_q_txirq(int irq, struct axienet_local *lp)
+{
+ int i, chan_sermask;
+ u16 chan_id = 1;
+ struct axienet_dma_q *q = lp->dq[0];
+
+ chan_sermask = axienet_dma_in32(q, XMCDMA_TXINT_SER_OFFSET);
+
+ for (i = 1, chan_id = 1; i != 0 && i <= chan_sermask;
+ i <<= 1, chan_id++) {
+ if (chan_sermask & i)
+ return chan_id;
+ }
+
+ return -ENODEV;
+}
+
+irqreturn_t __maybe_unused axienet_mcdma_tx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ int i, j = map_dma_q_txirq(irq, lp);
+ struct axienet_dma_q *q;
+
+ if (j < 0)
+ return IRQ_NONE;
+
+ i = get_mcdma_tx_q(lp, j);
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id));
+ if (status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id), status);
+ axienet_start_xmit_done(lp->ndev, q);
+ goto out;
+ }
+ if (!(status & XMCDMA_IRQ_ALL_MASK))
+ return IRQ_NONE;
+ if (status & XMCDMA_IRQ_ERR_MASK) {
+ dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: %pa\n",
+ &q->txq_bd_v[q->tx_bd_ci].phys);
+
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XMCDMA_IRQ_ALL_MASK);
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
+
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XMCDMA_IRQ_ALL_MASK);
+ /* write to the Rx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset, status);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+static inline int map_dma_q_rxirq(int irq, struct axienet_local *lp)
+{
+ int i, chan_sermask;
+ u16 chan_id = 1;
+ struct axienet_dma_q *q = lp->dq[0];
+
+ chan_sermask = axienet_dma_in32(q, XMCDMA_RXINT_SER_OFFSET +
+ q->rx_offset);
+
+ for (i = 1, chan_id = 1; i != 0 && i <= chan_sermask;
+ i <<= 1, chan_id++) {
+ if (chan_sermask & i)
+ return chan_id;
+ }
+
+ return -ENODEV;
+}
+
+irqreturn_t __maybe_unused axienet_mcdma_rx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ int i, j = map_dma_q_rxirq(irq, lp);
+ struct axienet_dma_q *q;
+
+ if (j < 0)
+ return IRQ_NONE;
+
+ i = get_mcdma_rx_q(lp, j);
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ if (status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) {
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ cr &= ~(XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+ napi_schedule(&lp->napi[i]);
+ }
+
+ if (!(status & XMCDMA_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ if (status & XMCDMA_IRQ_ERR_MASK) {
+ dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: %pa\n",
+ &q->rxq_bd_v[q->rx_bd_ci].phys);
+
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XMCDMA_IRQ_ALL_MASK);
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
+
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XMCDMA_IRQ_ALL_MASK);
+ /* write to the Rx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset, status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void axienet_strings(struct net_device *ndev, u32 sset, u8 *data)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
+ int i, j, k = 0;
+
+ for (i = 0, j = 0; i < AXIENET_TX_SSTATS_LEN(lp);) {
+ if (j >= lp->num_tx_queues)
+ break;
+ q = lp->dq[j];
+ if (i % 2 == 0)
+ k = (q->chan_id - 1) * 2;
+ if (sset == ETH_SS_STATS)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ axienet_get_tx_strings_stats[k].name,
+ ETH_GSTRING_LEN);
+ ++i;
+ k++;
+ if (i % 2 == 0)
+ ++j;
+ }
+ k = 0;
+ for (j = 0; i < AXIENET_TX_SSTATS_LEN(lp) +
+ AXIENET_RX_SSTATS_LEN(lp);) {
+ if (j >= lp->num_rx_queues)
+ break;
+ q = lp->dq[j];
+ if (i % 2 == 0)
+ k = (q->chan_id - 1) * 2;
+ if (sset == ETH_SS_STATS)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ axienet_get_rx_strings_stats[k].name,
+ ETH_GSTRING_LEN);
+ ++i;
+ k++;
+ if (i % 2 == 0)
+ ++j;
+ }
+}
+
+int axienet_sset_count(struct net_device *ndev, int sset)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return (AXIENET_TX_SSTATS_LEN(lp) + AXIENET_RX_SSTATS_LEN(lp));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void axienet_get_stats(struct net_device *ndev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
+ unsigned int i = 0, j;
+
+ for (i = 0, j = 0; i < AXIENET_TX_SSTATS_LEN(lp);) {
+ if (j >= lp->num_tx_queues)
+ break;
+
+ q = lp->dq[j];
+ data[i++] = q->tx_packets;
+ data[i++] = q->tx_bytes;
+ ++j;
+ }
+ for (j = 0; i < AXIENET_TX_SSTATS_LEN(lp) +
+ AXIENET_RX_SSTATS_LEN(lp);) {
+ if (j >= lp->num_rx_queues)
+ break;
+
+ q = lp->dq[j];
+ data[i++] = q->rx_packets;
+ data[i++] = q->rx_bytes;
+ ++j;
+ }
+}
+
+/**
+ * axienet_mcdma_err_handler - Tasklet handler for Axi MCDMA Error
+ * @data: Data passed
+ *
+ * Resets the Axi MCDMA and Axi Ethernet devices, and reconfigures the
+ * Tx/Rx BDs.
+ */
+void __maybe_unused axienet_mcdma_err_handler(unsigned long data)
+{
+ u32 axienet_status;
+ u32 cr, i, chan_en;
+ int mdio_mcreg = 0;
+ struct axienet_dma_q *q = (struct axienet_dma_q *)data;
+ struct axienet_local *lp = q->lp;
+ struct net_device *ndev = lp->ndev;
+ struct aximcdma_bd *cur_p;
+
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
+ axienet_mdio_wait_until_ready(lp);
+ /* Disable the MDIO interface till Axi Ethernet Reset is
+ * Completed. When we do an Axi Ethernet reset, it resets the
+ * Complete core including the MDIO. So if MDIO is not disabled
+ * When the reset process is started,
+ * MDIO will be broken afterwards.
+ */
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
+ ~XAE_MDIO_MC_MDIOEN_MASK));
+ }
+
+ __axienet_device_reset(q, XAXIDMA_TX_CR_OFFSET);
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
+ axienet_mdio_wait_until_ready(lp);
+ }
+
+ for (i = 0; i < TX_BD_NUM; i++) {
+ cur_p = &q->txq_bd_v[i];
+ if (cur_p->phys)
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ (cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ if (cur_p->tx_skb)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->tx_skb);
+ cur_p->phys = 0;
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ cur_p->sw_id_offset = 0;
+ cur_p->tx_skb = 0;
+ }
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ cur_p = &q->rxq_bd_v[i];
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ }
+
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+ q->rx_bd_ci = 0;
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XMCDMA_COALESCE_MASK) |
+ ((lp->coalesce_count_rx) << XMCDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XMCDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XMCDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XMCDMA_IRQ_ALL_MASK;
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XMCDMA_COALESCE_MASK)) |
+ ((lp->coalesce_count_tx) << XMCDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XMCDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XMCDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XMCDMA_IRQ_ALL_MASK;
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id) +
+ q->rx_offset, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET + q->rx_offset);
+ axienet_dma_out32(q, XMCDMA_CR_OFFSET + q->rx_offset,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) + q->rx_offset,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
+ q->rx_offset, q->rx_bd_p + (sizeof(*q->rxq_bd_v) *
+ (RX_BD_NUM - 1)));
+ chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET + q->rx_offset);
+ chan_en |= (1 << (q->chan_id - 1));
+ axienet_dma_out32(q, XMCDMA_CHEN_OFFSET + q->rx_offset, chan_en);
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id),
+ q->tx_bd_p);
+ cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET);
+ axienet_dma_out32(q, XMCDMA_CR_OFFSET,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id),
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET);
+ chan_en |= (1 << (q->chan_id - 1));
+ axienet_dma_out32(q, XMCDMA_CHEN_OFFSET, chan_en);
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
+ axienet_status &= ~XAE_RCW1_RX_MASK;
+ axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ }
+
+ if (lp->axienet_config->mactype == XAXIENET_1G && !lp->eth_hasnobuf) {
+ axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
+ if (axienet_status & XAE_INT_RXRJECT_MASK)
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ axienet_set_mac_address(ndev, NULL);
+ axienet_set_multicast_list(ndev);
+ lp->axienet_config->setoptions(ndev, lp->options);
+}
+
+int __maybe_unused axienet_mcdma_tx_probe(struct platform_device *pdev,
+ struct device_node *np,
+ struct axienet_local *lp)
+{
+ int i;
+ char dma_name[24];
+
+ for_each_tx_dma_queue(lp, i) {
+ struct axienet_dma_q *q;
+
+ q = lp->dq[i];
+
+ q->dma_regs = lp->mcdma_regs;
+ snprintf(dma_name, sizeof(dma_name), "mm2s_ch%d_introut",
+ q->chan_id);
+ q->tx_irq = platform_get_irq_byname(pdev, dma_name);
+ q->eth_hasdre = of_property_read_bool(np,
+ "xlnx,include-dre");
+ spin_lock_init(&q->tx_lock);
+ }
+ of_node_put(np);
+
+ return 0;
+}
+
+int __maybe_unused axienet_mcdma_rx_probe(struct platform_device *pdev,
+ struct axienet_local *lp,
+ struct net_device *ndev)
+{
+ int i;
+ char dma_name[24];
+
+ for_each_rx_dma_queue(lp, i) {
+ struct axienet_dma_q *q;
+
+ q = lp->dq[i];
+
+ q->dma_regs = lp->mcdma_regs;
+ snprintf(dma_name, sizeof(dma_name), "s2mm_ch%d_introut",
+ q->chan_id);
+ q->rx_irq = platform_get_irq_byname(pdev, dma_name);
+
+ spin_lock_init(&q->rx_lock);
+
+ netif_napi_add(ndev, &lp->napi[i], xaxienet_rx_poll,
+ XAXIENET_NAPI_WEIGHT);
+ }
+
+ return 0;
+}
+
+static ssize_t rxch_obs1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS1_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 1 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS2_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 2 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS3_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 3 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs4_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS4_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 4 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs5_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS5_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 5 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs6_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS6_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 6 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS1_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 1 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t txch_obs2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS2_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 2 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS3_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 3 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs4_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS4_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 4 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs5_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS5_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 5 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs6_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS6_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 6 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t chan_weight_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ return sprintf(buf, "chan_id is %d and weight is %d\n",
+ lp->chan_id, lp->weight);
+}
+
+static ssize_t chan_weight_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ int ret;
+ u16 flags, chan_id;
+ u32 val;
+
+ ret = kstrtou16(buf, 16, &flags);
+ if (ret)
+ return ret;
+
+ lp->chan_id = (flags & 0xF0) >> 4;
+ lp->weight = flags & 0x0F;
+
+ if (lp->chan_id < 8)
+ val = axienet_dma_in32(q, XMCDMA_TXWEIGHT0_OFFSET);
+ else
+ val = axienet_dma_in32(q, XMCDMA_TXWEIGHT1_OFFSET);
+
+ if (lp->chan_id > 7)
+ chan_id = lp->chan_id - 8;
+ else
+ chan_id = lp->chan_id;
+
+ val &= ~XMCDMA_TXWEIGHT_CH_MASK(chan_id);
+ val |= lp->weight << XMCDMA_TXWEIGHT_CH_SHIFT(chan_id);
+
+ if (lp->chan_id < 8)
+ axienet_dma_out32(q, XMCDMA_TXWEIGHT0_OFFSET, val);
+ else
+ axienet_dma_out32(q, XMCDMA_TXWEIGHT1_OFFSET, val);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(chan_weight);
+static DEVICE_ATTR_RO(rxch_obs1);
+static DEVICE_ATTR_RO(rxch_obs2);
+static DEVICE_ATTR_RO(rxch_obs3);
+static DEVICE_ATTR_RO(rxch_obs4);
+static DEVICE_ATTR_RO(rxch_obs5);
+static DEVICE_ATTR_RO(rxch_obs6);
+static DEVICE_ATTR_RO(txch_obs1);
+static DEVICE_ATTR_RO(txch_obs2);
+static DEVICE_ATTR_RO(txch_obs3);
+static DEVICE_ATTR_RO(txch_obs4);
+static DEVICE_ATTR_RO(txch_obs5);
+static DEVICE_ATTR_RO(txch_obs6);
+static const struct attribute *mcdma_attrs[] = {
+ &dev_attr_chan_weight.attr,
+ &dev_attr_rxch_obs1.attr,
+ &dev_attr_rxch_obs2.attr,
+ &dev_attr_rxch_obs3.attr,
+ &dev_attr_rxch_obs4.attr,
+ &dev_attr_rxch_obs5.attr,
+ &dev_attr_rxch_obs6.attr,
+ &dev_attr_txch_obs1.attr,
+ &dev_attr_txch_obs2.attr,
+ &dev_attr_txch_obs3.attr,
+ &dev_attr_txch_obs4.attr,
+ &dev_attr_txch_obs5.attr,
+ &dev_attr_txch_obs6.attr,
+ NULL,
+};
+
+static const struct attribute_group mcdma_attributes = {
+ .attrs = (struct attribute **)mcdma_attrs,
+};
+
+int axeinet_mcdma_create_sysfs(struct kobject *kobj)
+{
+ return sysfs_create_group(kobj, &mcdma_attributes);
+}
+
+void axeinet_mcdma_remove_sysfs(struct kobject *kobj)
+{
+ sysfs_remove_group(kobj, &mcdma_attributes);
+}
+
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 704babdbc8a2..ec9a82e09f7e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -96,7 +96,7 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
if (ret < 0)
return ret;
- axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val);
+ axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32)val);
axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
(((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
XAE_MDIO_MCR_PHYAD_MASK) |
@@ -126,9 +126,12 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
{
int ret;
u32 clk_div, host_clock;
+
struct mii_bus *bus;
struct resource res;
struct device_node *np1;
+ /* the ethernet controller device node */
+ struct device_node *npp = NULL;
/* clk_div can be calculated by deriving it from the equation:
* fMDIO = fHOST / ((1 + clk_div) * 2)
@@ -154,42 +157,45 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
* fHOST can be read from the flattened device tree as property
* "clock-frequency" from the CPU
*/
-
- np1 = of_find_node_by_name(NULL, "cpu");
- if (!np1) {
- netdev_warn(lp->ndev, "Could not find CPU device node.\n");
- netdev_warn(lp->ndev,
- "Setting MDIO clock divisor to default %d\n",
- DEFAULT_CLOCK_DIVISOR);
- clk_div = DEFAULT_CLOCK_DIVISOR;
- goto issue;
+ np1 = of_get_parent(lp->phy_node);
+ if (np1) {
+ npp = of_get_parent(np1);
+ of_node_put(np1);
}
- if (of_property_read_u32(np1, "clock-frequency", &host_clock)) {
- netdev_warn(lp->ndev, "clock-frequency property not found.\n");
- netdev_warn(lp->ndev,
- "Setting MDIO clock divisor to default %d\n",
- DEFAULT_CLOCK_DIVISOR);
+ if (!npp) {
+ dev_warn(lp->dev,
+ "Could not find ethernet controller device node.");
+ dev_warn(lp->dev, "Setting MDIO clock divisor to default %d\n",
+ DEFAULT_CLOCK_DIVISOR);
clk_div = DEFAULT_CLOCK_DIVISOR;
- of_node_put(np1);
- goto issue;
+ } else {
+ if (of_property_read_u32(npp, "clock-frequency", &host_clock)) {
+ netdev_warn(lp->ndev,
+ "clock-frequency property not found.\n");
+ netdev_warn(lp->ndev,
+ "Setting MDIO clock divisor to default %d\n",
+ DEFAULT_CLOCK_DIVISOR);
+ clk_div = DEFAULT_CLOCK_DIVISOR;
+ } else {
+ clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
+
+ /* If there is any remainder from the division of
+ * fHOST / (MAX_MDIO_FREQ * 2), then we need to add 1
+ * to the clock divisor or we will surely be
+ * above 2.5 MHz
+ */
+ if (host_clock % (MAX_MDIO_FREQ * 2))
+ clk_div++;
+ dev_dbg(lp->dev, "Setting MDIO clock divisor to %u ",
+ clk_div);
+ dev_dbg(lp->dev, "based on %u Hz host clock.\n",
+ host_clock);
+ }
+ of_node_put(npp);
}
- clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
- /* If there is any remainder from the division of
- * fHOST / (MAX_MDIO_FREQ * 2), then we need to add
- * 1 to the clock divisor or we will surely be above 2.5 MHz
- */
- if (host_clock % (MAX_MDIO_FREQ * 2))
- clk_div++;
-
- netdev_dbg(lp->ndev,
- "Setting MDIO clock divisor to %u/%u Hz host clock.\n",
- clk_div, host_clock);
-
- of_node_put(np1);
-issue:
axienet_iow(lp, XAE_MDIO_MC_OFFSET,
- (((u32) clk_div) | XAE_MDIO_MC_MDIOEN_MASK));
+ (((u32)clk_div) | XAE_MDIO_MC_MDIOEN_MASK));
ret = axienet_mdio_wait_until_ready(lp);
if (ret < 0)
@@ -199,10 +205,9 @@ issue:
if (!bus)
return -ENOMEM;
- np1 = of_get_parent(lp->phy_node);
- of_address_to_resource(np1, 0, &res);
+ of_address_to_resource(npp, 0, &res);
snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
- (unsigned long long) res.start);
+ (unsigned long long)res.start);
bus->priv = lp;
bus->name = "Xilinx Axi Ethernet MDIO";
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0de52e70abcc..134da2d726a7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -91,13 +91,11 @@
#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
-
-
#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */
#define ALIGNMENT 4
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT)
+#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((ulong)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -124,7 +122,6 @@
* @last_link: last link status
*/
struct net_local {
-
struct net_device *ndev;
bool tx_ping_pong;
@@ -133,7 +130,7 @@ struct net_local {
u32 next_rx_buf_to_use;
void __iomem *base_addr;
- spinlock_t reset_lock;
+ spinlock_t reset_lock; /* lock used for synchronization */
struct sk_buff *deferred_skb;
struct phy_device *phy_dev;
@@ -144,7 +141,6 @@ struct net_local {
int last_link;
};
-
/*************************/
/* EmacLite driver calls */
/*************************/
@@ -166,10 +162,12 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
drvdata->base_addr + XEL_TSR_OFFSET);
/* Enable the Rx interrupts for the first buffer */
- xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
+ xemaclite_writel(XEL_RSR_RECV_IE_MASK,
+ drvdata->base_addr + XEL_RSR_OFFSET);
/* Enable the Global Interrupt Enable */
- xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+ xemaclite_writel(XEL_GIER_GIE_MASK,
+ drvdata->base_addr + XEL_GIER_OFFSET);
}
/**
@@ -184,7 +182,8 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
u32 reg_data;
/* Disable the Global Interrupt Enable */
- xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+ xemaclite_writel(XEL_GIER_GIE_MASK,
+ drvdata->base_addr + XEL_GIER_OFFSET);
/* Disable the Tx interrupts for the first buffer */
reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
@@ -207,7 +206,7 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
* address in the EmacLite device.
*/
static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
u32 align_buffer;
u32 *to_u32_ptr;
@@ -264,7 +263,7 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
* to a 16-bit aligned buffer.
*/
static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
u16 *to_u16_ptr, *from_u16_ptr;
u32 *from_u32_ptr;
@@ -329,7 +328,6 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
-
/* Switch to next buffer if configured */
if (drvdata->tx_ping_pong != 0)
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
@@ -338,15 +336,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
* if it is configured in HW
*/
- addr = (void __iomem __force *)((u32 __force)addr ^
+ addr = (void __iomem __force *)((ulong __force)addr ^
XEL_BUFFER_OFFSET);
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
return -1; /* Buffers were full, return failure */
- } else
+ } else {
return -1; /* Buffer was full, return failure */
+ }
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *)addr, byte_count);
@@ -370,7 +369,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
* xemaclite_recv_data - Receive a frame
* @drvdata: Pointer to the Emaclite device private data
* @data: Address where the data is to be received
- * @maxlen: Maximum supported ethernet packet length
+ * @maxlen: Maximum supported ethernet packet length
*
* This function is intended to be called from the interrupt context or
* with a wrapper which waits for the receive frame to be available.
@@ -399,7 +398,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* will correct on subsequent calls
*/
if (drvdata->rx_ping_pong != 0)
- addr = (void __iomem __force *)((u32 __force)addr ^
+ addr = (void __iomem __force *)((ulong __force)addr ^
XEL_BUFFER_OFFSET);
else
return 0; /* No data was available */
@@ -421,7 +420,6 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* or an IP packet or an ARP packet
*/
if (proto_type > ETH_DATA_LEN) {
-
if (proto_type == ETH_P_IP) {
length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
@@ -431,23 +429,25 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
- } else if (proto_type == ETH_P_ARP)
+ } else if (proto_type == ETH_P_ARP) {
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
- else
+ } else {
/* Field contains type other than IP or ARP, use max
* frame size and let user parse it
*/
length = ETH_FRAME_LEN + ETH_FCS_LEN;
- } else
+ }
+ } else {
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+ }
if (WARN_ON(length > maxlen))
length = maxlen;
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET),
- data, length);
+ data, length);
/* Acknowledge the frame */
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
@@ -483,7 +483,8 @@ static void xemaclite_update_address(struct net_local *drvdata,
/* Update the MAC address in the EmacLite */
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
- xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR,
+ addr + XEL_TSR_OFFSET);
/* Wait for EmacLite to finish with the MAC address update */
while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
@@ -668,8 +669,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the first buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
@@ -677,10 +677,10 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
}
/* Check if the Transmission for the second buffer is completed */
- tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+ tx_status = xemaclite_readl(base_addr +
+ XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
@@ -776,7 +776,7 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* This function waits till the device is ready to accept a new MDIO
* request and then writes the val to the MDIO Write Data register.
*
- * Return: 0 upon success or a negative error upon failure
+ * Return: 0 upon success or a negative error upon failure
*/
static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u16 val)
@@ -837,6 +837,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
of_address_to_resource(npp, 0, &res);
if (lp->ndev->mem_start != res.start) {
struct phy_device *phydev;
+
phydev = of_phy_find_device(lp->phy_node);
if (!phydev)
dev_info(dev,
@@ -1191,9 +1192,9 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
}
dev_info(dev,
- "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n",
+ "Xilinx EmacLite at 0x%08X mapped to 0x%08lX, irq=%d\n",
(unsigned int __force)ndev->mem_start,
- (unsigned int __force)lp->base_addr, ndev->irq);
+ (unsigned long __force)lp->base_addr, ndev->irq);
return 0;
error:
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_cb.c b/drivers/net/ethernet/xilinx/xilinx_tsn_cb.c
new file mode 100644
index 000000000000..044c285365b3
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_cb.c
@@ -0,0 +1,177 @@
+/*
+ * Xilinx FPGA Xilinx TSN QCI Controller module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "xilinx_tsn_switch.h"
+
+#define IN_PORTID_MASK 0x3
+#define IN_PORTID_SHIFT 24
+#define MAX_SEQID_MASK 0x0000FFFF
+
+#define SEQ_REC_HIST_LEN_MASK 0x000000FF
+#define SEQ_REC_HIST_LEN_SHIFT 16
+#define SPLIT_STREAM_INPORTID_SHIFT 12
+#define SPLIT_STREAM_INPORTID_MASK 0x3
+#define SPLIT_STREAM_VLANID_MASK 0x00000FFF
+
+#define GATE_ID_SHIFT 24
+#define MEMBER_ID_SHIFT 8
+#define SEQ_RESET_SHIFT 7
+#define REC_TIMEOUT_SHIFT 6
+#define GATE_STATE_SHIFT 5
+#define FRER_VALID_SHIFT 4
+#define WR_OP_TYPE_SHIFT 2
+#define OP_TYPE_SHIFT 1
+#define WR_OP_TYPE_MASK 0x3
+#define FRER_EN_CONTROL_MASK 0x1
+
+/**
+ * frer_control - Configure thr control for frer
+ * @data: Value to be programmed
+ */
+void frer_control(struct frer_ctrl data)
+{
+ u32 mask = 0;
+
+ mask = data.gate_id << GATE_ID_SHIFT;
+ mask |= data.memb_id << MEMBER_ID_SHIFT;
+ mask |= data.seq_reset << SEQ_RESET_SHIFT;
+ mask |= data.gate_state << GATE_STATE_SHIFT;
+ mask |= data.rcvry_tmout << REC_TIMEOUT_SHIFT;
+ mask |= data.frer_valid << FRER_VALID_SHIFT;
+ mask |= (data.wr_op_type & WR_OP_TYPE_MASK) << WR_OP_TYPE_SHIFT;
+ mask |= data.op_type << OP_TYPE_SHIFT;
+ mask |= FRER_EN_CONTROL_MASK;
+
+ axienet_iow(&lp, FRER_CONTROL_OFFSET, mask);
+
+ /* wait for write to complete */
+ while ((axienet_ior(&lp, FRER_CONTROL_OFFSET) & FRER_EN_CONTROL_MASK))
+ ;
+}
+
+/**
+ * get_ingress_filter_config - Get Ingress Filter Configuration
+ * @data: Value returned
+ */
+void get_ingress_filter_config(struct in_fltr *data)
+{
+ u32 reg_val = 0;
+
+ reg_val = axienet_ior(&lp, INGRESS_FILTER_OFFSET);
+
+ data->max_seq_id = reg_val & MAX_SEQID_MASK;
+ data->in_port_id = (reg_val >> IN_PORTID_SHIFT) & IN_PORTID_MASK;
+}
+
+/**
+ * config_stream_filter - Configure Ingress Filter Configuration
+ * @data: Value to be programmed
+ */
+void config_ingress_filter(struct in_fltr data)
+{
+ u32 mask = 0;
+
+ mask = ((data.in_port_id & IN_PORTID_MASK) << IN_PORTID_SHIFT) |
+ (data.max_seq_id & MAX_SEQID_MASK);
+ axienet_iow(&lp, INGRESS_FILTER_OFFSET, mask);
+}
+
+/**
+ * get_member_reg - Read frer member Configuration registers value
+ * @data: Value returned
+ */
+void get_member_reg(struct frer_memb_config *data)
+{
+ u32 conf_r1 = 0;
+
+ conf_r1 = axienet_ior(&lp, FRER_CONFIG_REG1);
+ data->rem_ticks = axienet_ior(&lp, FRER_CONFIG_REG2);
+
+ data->seq_rec_hist_len = (conf_r1 >> SEQ_REC_HIST_LEN_SHIFT)
+ & SEQ_REC_HIST_LEN_MASK;
+ data->split_strm_egport_id = (conf_r1 >> SPLIT_STREAM_INPORTID_SHIFT)
+ & SPLIT_STREAM_INPORTID_MASK;
+ data->split_strm_vlan_id = conf_r1 & SPLIT_STREAM_VLANID_MASK;
+}
+
+/**
+ * program_member_reg - configure frer member Configuration registers
+ * @data: Value to be programmed
+ */
+void program_member_reg(struct frer_memb_config data)
+{
+ u32 conf_r1 = 0;
+
+ conf_r1 = (data.seq_rec_hist_len & SEQ_REC_HIST_LEN_MASK)
+ << SEQ_REC_HIST_LEN_SHIFT;
+ conf_r1 = conf_r1 | ((data.split_strm_egport_id
+ & SPLIT_STREAM_INPORTID_MASK)
+ << SPLIT_STREAM_INPORTID_SHIFT);
+ conf_r1 = conf_r1 | (data.split_strm_vlan_id
+ & SPLIT_STREAM_VLANID_MASK);
+
+ axienet_iow(&lp, FRER_CONFIG_REG1, conf_r1);
+ axienet_iow(&lp, FRER_CONFIG_REG2, data.rem_ticks);
+}
+
+/**
+ * get_frer_static_counter - get frer static counters value
+ * @data: return value, containing counter value
+ */
+void get_frer_static_counter(struct frer_static_counter *data)
+{
+ int offset = (data->num) * 8;
+
+ data->frer_fr_count.lsb = axienet_ior(&lp, TOTAL_FRER_FRAMES_OFFSET +
+ offset);
+ data->frer_fr_count.msb = axienet_ior(&lp, TOTAL_FRER_FRAMES_OFFSET +
+ offset + 0x4);
+
+ data->disc_frames_in_portid.lsb = axienet_ior(&lp,
+ FRER_DISCARD_INGS_FLTR_OFFSET + offset);
+ data->disc_frames_in_portid.msb = axienet_ior(&lp,
+ FRER_DISCARD_INGS_FLTR_OFFSET + offset + 0x4);
+
+ data->pass_frames_ind_recv.lsb = axienet_ior(&lp,
+ FRER_PASS_FRAMES_INDV_OFFSET + offset);
+ data->pass_frames_ind_recv.msb = axienet_ior(&lp,
+ FRER_PASS_FRAMES_INDV_OFFSET + offset + 0x4);
+
+ data->disc_frames_ind_recv.lsb = axienet_ior(&lp,
+ FRER_DISCARD_FRAMES_INDV_OFFSET + offset);
+ data->disc_frames_ind_recv.msb = axienet_ior(&lp,
+ FRER_DISCARD_FRAMES_INDV_OFFSET + offset + 0x4);
+
+ data->pass_frames_seq_recv.lsb = axienet_ior(&lp,
+ FRER_PASS_FRAMES_SEQ_OFFSET + offset);
+ data->pass_frames_seq_recv.msb = axienet_ior(&lp,
+ FRER_PASS_FRAMES_SEQ_OFFSET + offset + 0x4);
+
+ data->disc_frames_seq_recv.lsb = axienet_ior(&lp,
+ FRER_DISCARD_FRAMES_SEQ_OFFSET + offset);
+ data->disc_frames_seq_recv.msb = axienet_ior(&lp,
+ FRER_DISCARD_FRAMES_SEQ_OFFSET + offset + 0x4);
+
+ data->rogue_frames_seq_recv.lsb = axienet_ior(&lp,
+ FRER_ROGUE_FRAMES_SEQ_OFFSET + offset);
+ data->rogue_frames_seq_recv.msb = axienet_ior(&lp,
+ FRER_ROGUE_FRAMES_SEQ_OFFSET + offset + 0x4);
+
+ data->seq_recv_rst.lsb = axienet_ior(&lp,
+ SEQ_RECV_RESETS_OFFSET + offset);
+ data->seq_recv_rst.msb = axienet_ior(&lp,
+ SEQ_RECV_RESETS_OFFSET + offset + 0x4);
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_ep.c b/drivers/net/ethernet/xilinx/xilinx_tsn_ep.c
new file mode 100644
index 000000000000..bcd6c737bc27
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_ep.c
@@ -0,0 +1,161 @@
+/*
+ * Xilinx FPGA Xilinx TSN End point driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/skbuff.h>
+
+#include "xilinx_axienet.h"
+
+/**
+ * tsn_ep_ioctl - TSN endpoint ioctl interface.
+ * @dev: Pointer to the net_device structure
+ * @rq: Socket ioctl interface request structure
+ * @cmd: Ioctl case
+ *
+ * Return: 0 on success, Non-zero error value on failure.
+ *
+ * This is the ioctl interface for TSN end point. Currently this
+ * supports only gate programming.
+ */
+static int tsn_ep_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ switch (cmd) {
+#ifdef CONFIG_XILINX_TSN_QBV
+ case SIOCCHIOCTL:
+ return axienet_set_schedule(dev, rq->ifr_data);
+ case SIOC_GET_SCHED:
+ return axienet_get_schedule(dev, rq->ifr_data);
+#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * tsn_ep_xmit - TSN endpoint xmit routine.
+ * @skb: Packet data
+ * @dev: Pointer to the net_device structure
+ *
+ * Return: Always returns NETDEV_TX_OK.
+ *
+ * This is dummy xmit function for endpoint as all the data path is assumed to
+ * be connected by TEMAC1 as per linux view
+ */
+static int tsn_ep_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops ep_netdev_ops = {
+ .ndo_do_ioctl = tsn_ep_ioctl,
+ .ndo_start_xmit = tsn_ep_xmit,
+};
+
+static const struct of_device_id tsn_ep_of_match[] = {
+ { .compatible = "xlnx,tsn-ep"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, tsn_ep_of_match);
+
+/**
+ * tsn_ep_probe - TSN ep pointer probe function.
+ * @pdev: Pointer to platform device structure.
+ *
+ * Return: 0, on success
+ * Non-zero error value on failure.
+ *
+ * This is the probe routine for TSN endpoint driver.
+ */
+static int tsn_ep_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct axienet_local *lp;
+ struct net_device *ndev;
+ struct resource *ethres;
+ u16 num_tc = 0;
+
+ ndev = alloc_netdev(0, "ep", NET_NAME_UNKNOWN, ether_setup);
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ndev->netdev_ops = &ep_netdev_ops;
+
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+ lp->dev = &pdev->dev;
+ lp->options = XAE_OPTION_DEFAULTS;
+
+ ret = of_property_read_u16(
+ pdev->dev.of_node, "xlnx,num-tc", &num_tc);
+ if (ret || (num_tc != 2 && num_tc != 3))
+ lp->num_tc = XAE_MAX_TSN_TC;
+ else
+ lp->num_tc = num_tc;
+ /* Map device registers */
+ ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
+ if (IS_ERR(lp->regs)) {
+ ret = PTR_ERR(lp->regs);
+ goto free_netdev;
+ }
+
+ ret = register_netdev(lp->ndev);
+ if (ret)
+ dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
+
+ return ret;
+
+free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int tsn_ep_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ unregister_netdev(ndev);
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static struct platform_driver tsn_ep_driver = {
+ .probe = tsn_ep_probe,
+ .remove = tsn_ep_remove,
+ .driver = {
+ .name = "tsn_ep_axienet",
+ .of_match_table = tsn_ep_of_match,
+ },
+};
+
+module_platform_driver(tsn_ep_driver);
+
+MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
+MODULE_AUTHOR("Xilinx");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.c b/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.c
new file mode 100644
index 000000000000..f48c2e0cb69e
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.c
@@ -0,0 +1,223 @@
+/*
+ * Xilinx FPGA Xilinx TSN QBU/QBR - Frame Preemption module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Priyadarshini Babu <priyadar@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_axienet.h"
+#include "xilinx_tsn_preemption.h"
+
+/**
+ * axienet_preemption - Configure Frame Preemption
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: Value to be programmed
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_preemption(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ u8 preemp;
+
+ if (copy_from_user(&preemp, useraddr, sizeof(preemp)))
+ return -EFAULT;
+
+ axienet_iow(lp, PREEMPTION_ENABLE_REG, preemp & PREEMPTION_ENABLE);
+ return 0;
+}
+
+/**
+ * axienet_preemption_ctrl - Configure Frame Preemption Control register
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: Value to be programmed
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_preemption_ctrl(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct preempt_ctrl_sts data;
+ u32 value;
+
+ if (copy_from_user(&data, useraddr, sizeof(struct preempt_ctrl_sts)))
+ return -EFAULT;
+ value = axienet_ior(lp, PREEMPTION_CTRL_STS_REG);
+
+ value &= ~(VERIFY_TIMER_VALUE_MASK << VERIFY_TIMER_VALUE_SHIFT);
+ value |= (data.verify_timer_value << VERIFY_TIMER_VALUE_SHIFT);
+ value &= ~(ADDITIONAL_FRAG_SIZE_MASK << ADDITIONAL_FRAG_SIZE_SHIFT);
+ value |= (data.additional_frag_size << ADDITIONAL_FRAG_SIZE_SHIFT);
+ value &= ~(DISABLE_PREEMPTION_VERIFY);
+ value |= (data.disable_preemp_verify);
+
+ axienet_iow(lp, PREEMPTION_CTRL_STS_REG, value);
+ return 0;
+}
+
+/**
+ * axienet_preemption_sts - Get Frame Preemption Status
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: return value, containing Frame Preemption status
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_preemption_sts(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct preempt_ctrl_sts status;
+ u32 value;
+
+ value = axienet_ior(lp, PREEMPTION_CTRL_STS_REG);
+
+ status.tx_preemp_sts = (value & TX_PREEMPTION_STS) ? 1 : 0;
+ status.mac_tx_verify_sts = (value >> MAC_MERGE_TX_VERIFY_STS_SHIFT) &
+ MAC_MERGE_TX_VERIFY_STS_MASK;
+ status.verify_timer_value = (value >> VERIFY_TIMER_VALUE_SHIFT) &
+ VERIFY_TIMER_VALUE_MASK;
+ status.additional_frag_size = (value >> ADDITIONAL_FRAG_SIZE_SHIFT) &
+ ADDITIONAL_FRAG_SIZE_MASK;
+ status.disable_preemp_verify = value & DISABLE_PREEMPTION_VERIFY;
+
+ if (copy_to_user(useraddr, &status, sizeof(struct preempt_ctrl_sts)))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * statistic_cnts - Read statistics counter registers
+ * @ndev: Pointer to the net_device structure
+ * @ptr: Buffer addr to fill the counter values
+ * @count: read #count number of registers
+ * @addr_off: Register address to be read
+ */
+static void statistic_cnts(struct net_device *ndev, void *ptr,
+ unsigned int count, unsigned int addr_off)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ int *buf = (int *)ptr;
+ int i = 0;
+
+ for (i = 0; i < count; i++) {
+ buf[i] = axienet_ior(lp, addr_off);
+ addr_off += 4;
+ }
+}
+
+/**
+ * axienet_preemption_cnt - Get Frame Preemption Statistics counter
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: return value, containing counters value
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_preemption_cnt(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct emac_pmac_stats stats;
+
+ statistic_cnts(ndev, &stats.emac,
+ sizeof(struct statistics_counters) / 4,
+ RX_BYTES_EMAC_REG);
+
+ stats.preemp_en = axienet_ior(lp, PREEMPTION_ENABLE_REG);
+ if (stats.preemp_en) {
+ statistic_cnts(ndev, &stats.pmac.sts,
+ sizeof(struct statistics_counters) / 4,
+ RX_BYTES_PMAC_REG);
+ statistic_cnts(ndev, &stats.pmac.merge,
+ sizeof(struct mac_merge_counters) / 4,
+ TX_HOLD_REG);
+ }
+
+ if (copy_to_user(useraddr, &stats, sizeof(struct emac_pmac_stats)))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * axienet_qbu_user_override - Configure QBU user override register
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: Value to be programmed
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_qbu_user_override(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct qbu_user data;
+ u32 value;
+
+ if (copy_from_user(&data, useraddr, sizeof(struct qbu_user)))
+ return -EFAULT;
+
+ value = axienet_ior(lp, QBU_USER_OVERRIDE_REG);
+
+ if (data.set & QBU_WINDOW) {
+ if (data.user.hold_rel_window) {
+ value |= USER_HOLD_REL_ENABLE_VALUE;
+ value |= HOLD_REL_WINDOW_OVERRIDE;
+ } else {
+ value &= ~(USER_HOLD_REL_ENABLE_VALUE);
+ value &= ~(HOLD_REL_WINDOW_OVERRIDE);
+ }
+ }
+ if (data.set & QBU_GUARD_BAND) {
+ if (data.user.guard_band)
+ value |= GUARD_BAND_OVERRUN_CNT_INC_OVERRIDE;
+ else
+ value &= ~(GUARD_BAND_OVERRUN_CNT_INC_OVERRIDE);
+ }
+ if (data.set & QBU_HOLD_TIME) {
+ if (data.user.hold_time_override) {
+ value |= HOLD_TIME_OVERRIDE;
+ value &= ~(USER_HOLD_TIME_MASK << USER_HOLD_TIME_SHIFT);
+ value |= data.user.user_hold_time <<
+ USER_HOLD_TIME_SHIFT;
+ } else {
+ value &= ~(HOLD_TIME_OVERRIDE);
+ value &= ~(USER_HOLD_TIME_MASK << USER_HOLD_TIME_SHIFT);
+ }
+ }
+ if (data.set & QBU_REL_TIME) {
+ if (data.user.rel_time_override) {
+ value |= REL_TIME_OVERRIDE;
+ value &= ~(USER_REL_TIME_MASK << USER_REL_TIME_SHIFT);
+ value |= data.user.user_rel_time << USER_REL_TIME_SHIFT;
+ } else {
+ value &= ~(REL_TIME_OVERRIDE);
+ value &= ~(USER_REL_TIME_MASK << USER_REL_TIME_SHIFT);
+ }
+ }
+
+ axienet_iow(lp, QBU_USER_OVERRIDE_REG, value);
+ return 0;
+}
+
+/**
+ * axienet_qbu_sts - Get QBU Core status
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: return value, containing QBU core status value
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_qbu_sts(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct qbu_core_status status;
+ u32 value = 0;
+
+ value = axienet_ior(lp, QBU_CORE_STS_REG);
+ status.hold_time = (value >> HOLD_TIME_STS_SHIFT) & HOLD_TIME_STS_MASK;
+ status.rel_time = (value >> REL_TIME_STS_SHIFT) & REL_TIME_STS_MASK;
+ status.hold_rel_en = (value & HOLD_REL_ENABLE_STS) ? 1 : 0;
+ status.pmac_hold_req = value & PMAC_HOLD_REQ_STS;
+
+ if (copy_to_user(useraddr, &status, sizeof(struct qbu_core_status)))
+ return -EFAULT;
+ return 0;
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.h b/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.h
new file mode 100644
index 000000000000..d8655513664d
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.h
@@ -0,0 +1,159 @@
+/**
+ * Xilinx TSN QBU/QBR - Frame Preemption header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Priyadarshini Babu <priyadar@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef XILINX_TSN_PREEMPTION_H
+#define XILINX_TSN_PREEMPTION_H
+
+#define PREEMPTION_ENABLE_REG 0x00000440
+#define PREEMPTION_CTRL_STS_REG 0x00000444
+#define QBU_USER_OVERRIDE_REG 0x00000448
+#define QBU_CORE_STS_REG 0x0000044c
+#define TX_HOLD_REG 0x00000910
+#define RX_BYTES_EMAC_REG 0x00000200
+#define RX_BYTES_PMAC_REG 0x00000800
+
+#define PREEMPTION_ENABLE BIT(0)
+
+#define TX_PREEMPTION_STS BIT(31)
+#define MAC_MERGE_TX_VERIFY_STS_MASK 0x7
+#define MAC_MERGE_TX_VERIFY_STS_SHIFT 24
+#define VERIFY_TIMER_VALUE_MASK 0x7F
+#define VERIFY_TIMER_VALUE_SHIFT 8
+#define ADDITIONAL_FRAG_SIZE_MASK 0x3
+#define ADDITIONAL_FRAG_SIZE_SHIFT 4
+#define DISABLE_PREEMPTION_VERIFY BIT(0)
+
+#define USER_HOLD_REL_ENABLE_VALUE BIT(31)
+#define USER_HOLD_TIME_MASK 0x1FF
+#define USER_HOLD_TIME_SHIFT 16
+#define USER_REL_TIME_MASK 0x3F
+#define USER_REL_TIME_SHIFT 8
+#define GUARD_BAND_OVERRUN_CNT_INC_OVERRIDE BIT(3)
+#define HOLD_REL_WINDOW_OVERRIDE BIT(2)
+#define HOLD_TIME_OVERRIDE BIT(1)
+#define REL_TIME_OVERRIDE BIT(0)
+
+#define HOLD_REL_ENABLE_STS BIT(31)
+#define HOLD_TIME_STS_MASK 0x1FF
+#define HOLD_TIME_STS_SHIFT 16
+#define REL_TIME_STS_MASK 0x3F
+#define REL_TIME_STS_SHIFT 8
+#define PMAC_HOLD_REQ_STS BIT(0)
+
+struct preempt_ctrl_sts {
+ u8 tx_preemp_sts:1;
+ u8 mac_tx_verify_sts:3;
+ u8 verify_timer_value:7;
+ u8 additional_frag_size:2;
+ u8 disable_preemp_verify:1;
+} __packed;
+
+struct qbu_user_override {
+ u8 enable_value:1;
+ u16 user_hold_time:9;
+ u8 user_rel_time:6;
+ u8 guard_band:1;
+ u8 hold_rel_window:1;
+ u8 hold_time_override:1;
+ u8 rel_time_override:1;
+} __packed;
+
+struct qbu_user {
+ struct qbu_user_override user;
+ u8 set;
+};
+
+#define QBU_WINDOW BIT(0)
+#define QBU_GUARD_BAND BIT(1)
+#define QBU_HOLD_TIME BIT(2)
+#define QBU_REL_TIME BIT(3)
+
+struct qbu_core_status {
+ u16 hold_time;
+ u8 rel_time;
+ u8 hold_rel_en:1;
+ u8 pmac_hold_req:1;
+} __packed;
+
+struct cnt_64 {
+ unsigned int msb;
+ unsigned int lsb;
+};
+
+union static_cntr {
+ u64 cnt;
+ struct cnt_64 word;
+};
+
+struct mac_merge_counters {
+ union static_cntr tx_hold_cnt;
+ union static_cntr tx_frag_cnt;
+ union static_cntr rx_assembly_ok_cnt;
+ union static_cntr rx_assembly_err_cnt;
+ union static_cntr rx_smd_err_cnt;
+ union static_cntr rx_frag_cnt;
+};
+
+struct statistics_counters {
+ union static_cntr rx_bytes_cnt;
+ union static_cntr tx_bytes_cnt;
+ union static_cntr undersize_frames_cnt;
+ union static_cntr frag_frames_cnt;
+ union static_cntr rx_64_bytes_frames_cnt;
+ union static_cntr rx_65_127_bytes_frames_cnt;
+ union static_cntr rx_128_255_bytes_frames_cnt;
+ union static_cntr rx_256_511_bytes_frames_cnt;
+ union static_cntr rx_512_1023_bytes_frames_cnt;
+ union static_cntr rx_1024_max_frames_cnt;
+ union static_cntr rx_oversize_frames_cnt;
+ union static_cntr tx_64_bytes_frames_cnt;
+ union static_cntr tx_65_127_bytes_frames_cnt;
+ union static_cntr tx_128_255_bytes_frames_cnt;
+ union static_cntr tx_256_511_bytes_frames_cnt;
+ union static_cntr tx_512_1023_bytes_frames_cnt;
+ union static_cntr tx_1024_max_frames_cnt;
+ union static_cntr tx_oversize_frames_cnt;
+ union static_cntr rx_good_frames_cnt;
+ union static_cntr rx_fcs_err_cnt;
+ union static_cntr rx_good_broadcast_frames_cnt;
+ union static_cntr rx_good_multicast_frames_cnt;
+ union static_cntr rx_good_control_frames_cnt;
+ union static_cntr rx_out_of_range_err_cnt;
+ union static_cntr rx_good_vlan_frames_cnt;
+ union static_cntr rx_good_pause_frames_cnt;
+ union static_cntr rx_bad_opcode_frames_cnt;
+ union static_cntr tx_good_frames_cnt;
+ union static_cntr tx_good_broadcast_frames_cnt;
+ union static_cntr tx_good_multicast_frames_cnt;
+ union static_cntr tx_underrun_err_cnt;
+ union static_cntr tx_good_control_frames_cnt;
+ union static_cntr tx_good_vlan_frames_cnt;
+ union static_cntr tx_good_pause_frames_cnt;
+};
+
+struct pmac_counters {
+ struct statistics_counters sts;
+ struct mac_merge_counters merge;
+};
+
+struct emac_pmac_stats {
+ u8 preemp_en;
+ struct statistics_counters emac;
+ struct pmac_counters pmac;
+};
+
+#endif /* XILINX_TSN_PREEMPTION_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_ptp.h b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp.h
new file mode 100644
index 000000000000..d81b0acf12f0
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp.h
@@ -0,0 +1,88 @@
+/*
+ * Xilinx TSN PTP header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _TSN_PTP_H_
+#define _TSN_PTP_H_
+
+#define PTP_HW_TSTAMP_SIZE 8 /* 64 bit timestamp */
+#define PTP_RX_HWBUF_SIZE 256
+#define PTP_RX_FRAME_SIZE 252
+#define PTP_HW_TSTAMP_OFFSET (PTP_RX_HWBUF_SIZE - PTP_HW_TSTAMP_SIZE)
+
+#define PTP_MSG_TYPE_MASK BIT(3)
+#define PTP_TYPE_SYNC 0x0
+#define PTP_TYPE_FOLLOW_UP 0x8
+#define PTP_TYPE_PDELAYREQ 0x2
+#define PTP_TYPE_PDELAYRESP 0x3
+#define PTP_TYPE_PDELAYRESP_FOLLOW_UP 0xA
+#define PTP_TYPE_ANNOUNCE 0xB
+#define PTP_TYPE_SIGNALING 0xC
+
+#define PTP_TX_CONTROL_OFFSET 0x00012000 /**< Tx PTP Control Reg */
+#define PTP_RX_CONTROL_OFFSET 0x00012004 /**< Rx PTP Control Reg */
+#define RX_FILTER_CONTROL 0x00012008 /**< Rx Filter Ctrl Reg */
+
+#define PTP_RX_BASE_OFFSET 0x00010000
+#define PTP_RX_CONTROL_OFFSET 0x00012004 /**< Rx PTP Control Reg */
+#define PTP_RX_PACKET_FIELD_MASK 0x00000F00
+#define PTP_RX_PACKET_CLEAR 0x00000001
+
+#define PTP_TX_BUFFER_OFFSET(index) (0x00011000 + (index) * 0x100)
+
+#define PTP_TX_CMD_FIELD_LEN 8
+#define PTP_TX_CMD_1STEP_SHIFT BIT(16)
+#define PTP_TX_BUFFER_CMD2_FIELD 0x4
+
+#define PTP_TX_SYNC_OFFSET 0x00011000
+#define PTP_TX_FOLLOW_UP_OFFSET 0x00011100
+#define PTP_TX_PDELAYREQ_OFFSET 0x00011200
+#define PTP_TX_PDELAYRESP_OFFSET 0x00011300
+#define PTP_TX_PDELAYRESP_FOLLOW_UP_OFFSET 0x00011400
+#define PTP_TX_ANNOUNCE_OFFSET 0x00011500
+#define PTP_TX_SIGNALING_OFFSET 0x00011600
+#define PTP_TX_GENERIC_OFFSET 0x00011700
+#define PTP_TX_SEND_SYNC_FRAME_MASK 0x00000001
+#define PTP_TX_SEND_FOLLOWUP_FRAME_MASK 0x00000002
+#define PTP_TX_SEND_PDELAYREQ_FRAME_MASK 0x00000004
+#define PTP_TX_SEND_PDELAYRESP_FRAME_MASK 0x00000008
+#define PTP_TX_SEND_PDELAYRESPFOLLOWUP_FRAME_MASK 0x00000010
+#define PTP_TX_SEND_ANNOUNCE_FRAME_MASK 0x00000020
+#define PTP_TX_SEND_FRAME6_BIT_MASK 0x00000040
+#define PTP_TX_SEND_FRAME7_BIT_MASK 0x00000080
+#define PTP_TX_FRAME_WAITING_MASK 0x0000ff00
+#define PTP_TX_FRAME_WAITING_SHIFT 8
+#define PTP_TX_WAIT_SYNC_FRAME_MASK 0x00000100
+#define PTP_TX_WAIT_FOLLOWUP_FRAME_MASK 0x00000200
+#define PTP_TX_WAIT_PDELAYREQ_FRAME_MASK 0x00000400
+#define PTP_TX_WAIT_PDELAYRESP_FRAME_MASK 0x00000800
+#define PTP_TX_WAIT_PDELAYRESPFOLLOWUP_FRAME_MASK 0x00001000
+#define PTP_TX_WAIT_ANNOUNCE_FRAME_MASK 0x00002000
+#define PTP_TX_WAIT_FRAME6_BIT_MASK 0x00004000
+#define PTP_TX_WAIT_FRAME7_BIT_MASK 0x00008000
+#define PTP_TX_WAIT_ALL_FRAMES_MASK 0x0000FF00
+#define PTP_TX_PACKET_FIELD_MASK 0x00070000
+#define PTP_TX_PACKET_FIELD_SHIFT 16
+/* 1-step Correction Field offset 802.1 ASrev */
+#define PTP_CRCT_FIELD_OFFSET 22
+/* 1-step Time Of Day offset 1588-2008 */
+#define PTP_TOD_FIELD_OFFSET 48
+
+int axienet_ptp_xmit(struct sk_buff *skb, struct net_device *ndev);
+irqreturn_t axienet_ptp_rx_irq(int irq, void *_ndev);
+irqreturn_t axienet_ptp_tx_irq(int irq, void *_ndev);
+
+#endif
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_clock.c b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_clock.c
new file mode 100644
index 000000000000..05c906019694
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_clock.c
@@ -0,0 +1,325 @@
+/*
+ * Xilinx FPGA Xilinx TSN PTP protocol clock Controller module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include "xilinx_tsn_timer.h"
+
+struct xlnx_ptp_timer {
+ struct device *dev;
+ void __iomem *baseaddr;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ spinlock_t reg_lock; /* ptp timer lock */
+ int irq;
+ int pps_enable;
+ int countpulse;
+};
+
+static void xlnx_tod_read(struct xlnx_ptp_timer *timer, struct timespec64 *ts)
+{
+ u32 sec, nsec;
+
+ nsec = in_be32(timer->baseaddr + XTIMER1588_CURRENT_RTC_NS);
+ sec = in_be32(timer->baseaddr + XTIMER1588_CURRENT_RTC_SEC_L);
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static void xlnx_rtc_offset_write(struct xlnx_ptp_timer *timer,
+ const struct timespec64 *ts)
+{
+ pr_debug("%s: sec: %ld nsec: %ld\n", __func__, ts->tv_sec, ts->tv_nsec);
+
+ out_be32((timer->baseaddr + XTIMER1588_RTC_OFFSET_SEC_H), 0);
+ out_be32((timer->baseaddr + XTIMER1588_RTC_OFFSET_SEC_L),
+ (ts->tv_sec));
+ out_be32((timer->baseaddr + XTIMER1588_RTC_OFFSET_NS), ts->tv_nsec);
+}
+
+static void xlnx_rtc_offset_read(struct xlnx_ptp_timer *timer,
+ struct timespec64 *ts)
+{
+ ts->tv_sec = in_be32(timer->baseaddr + XTIMER1588_RTC_OFFSET_SEC_L);
+ ts->tv_nsec = in_be32(timer->baseaddr + XTIMER1588_RTC_OFFSET_NS);
+}
+
+/* PTP clock operations
+ */
+static int xlnx_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+
+ int neg_adj = 0;
+ u64 freq;
+ u32 diff, incval;
+
+ /* This number should be replaced by a call to get the frequency
+ * from the device-tree. Currently assumes 125MHz
+ */
+ incval = 0x800000;
+ /* for 156.25 MHZ Ref clk the value is incval = 0x800000; */
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ freq = incval;
+ freq *= ppb;
+ diff = div_u64(freq, 1000000000ULL);
+
+ pr_debug("%s: adj: %d ppb: %d\n", __func__, diff, ppb);
+
+ incval = neg_adj ? (incval - diff) : (incval + diff);
+ out_be32((timer->baseaddr + XTIMER1588_RTC_INCREMENT), incval);
+ return 0;
+}
+
+static int xlnx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ unsigned long flags;
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+ struct timespec64 now, then = ns_to_timespec64(delta);
+
+ spin_lock_irqsave(&timer->reg_lock, flags);
+
+ xlnx_rtc_offset_read(timer, &now);
+
+ now = timespec64_add(now, then);
+
+ xlnx_rtc_offset_write(timer, (const struct timespec64 *)&now);
+ spin_unlock_irqrestore(&timer->reg_lock, flags);
+
+ return 0;
+}
+
+static int xlnx_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ unsigned long flags;
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+ spin_lock_irqsave(&timer->reg_lock, flags);
+
+ xlnx_tod_read(timer, ts);
+
+ spin_unlock_irqrestore(&timer->reg_lock, flags);
+ return 0;
+}
+
+/**
+ * xlnx_ptp_settime - Set the current time on the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec64 containing the new time for the cycle counter
+ *
+ * Return: 0 in all cases.
+ *
+ * The seconds register is written first, then the nanosecond
+ * The hardware loads the entire new value when a nanosecond register
+ * is written
+ **/
+static int xlnx_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+ struct timespec64 delta, tod;
+ struct timespec64 offset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timer->reg_lock, flags);
+
+ /* First zero the offset */
+ offset.tv_sec = 0;
+ offset.tv_nsec = 0;
+ xlnx_rtc_offset_write(timer, &offset);
+
+ /* Get the current timer value */
+ xlnx_tod_read(timer, &tod);
+
+ /* Subtract the current reported time from our desired time */
+ delta = timespec64_sub(*ts, tod);
+
+ /* Don't write a negative offset */
+ if (delta.tv_sec <= 0) {
+ delta.tv_sec = 0;
+ if (delta.tv_nsec < 0)
+ delta.tv_nsec = 0;
+ }
+
+ xlnx_rtc_offset_write(timer, &delta);
+ spin_unlock_irqrestore(&timer->reg_lock, flags);
+ return 0;
+}
+
+static int xlnx_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PPS:
+ timer->pps_enable = 1;
+ return 0;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info xlnx_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "Xilinx Timer",
+ .max_adj = 999999999,
+ .n_ext_ts = 0,
+ .pps = 1,
+ .adjfreq = xlnx_ptp_adjfreq,
+ .adjtime = xlnx_ptp_adjtime,
+ .gettime64 = xlnx_ptp_gettime,
+ .settime64 = xlnx_ptp_settime,
+ .enable = xlnx_ptp_enable,
+};
+
+/* module operations */
+
+/**
+ * xlnx_ptp_timer_isr - Interrupt Service Routine
+ * @irq: IRQ number
+ * @priv: pointer to the timer structure
+ *
+ * Returns: IRQ_HANDLED for all cases
+ *
+ * Handles the timer interrupt. The timer interrupt fires 128 times per
+ * secound. When our count reaches 128 emit a PTP_CLOCK_PPS event
+ */
+static irqreturn_t xlnx_ptp_timer_isr(int irq, void *priv)
+{
+ struct xlnx_ptp_timer *timer = (struct xlnx_ptp_timer *)priv;
+ struct ptp_clock_event event;
+
+ event.type = PTP_CLOCK_PPS;
+ ++timer->countpulse;
+ if (timer->countpulse >= PULSESIN1PPS) {
+ timer->countpulse = 0;
+ if ((timer->ptp_clock) && (timer->pps_enable))
+ ptp_clock_event(timer->ptp_clock, &event);
+ }
+ out_be32((timer->baseaddr + XTIMER1588_INTERRUPT),
+ (1 << XTIMER1588_INT_SHIFT));
+
+ return IRQ_HANDLED;
+}
+
+int axienet_ptp_timer_remove(void *priv)
+{
+ struct xlnx_ptp_timer *timer = (struct xlnx_ptp_timer *)priv;
+
+ free_irq(timer->irq, (void *)timer);
+
+ axienet_phc_index = -1;
+ if (timer->ptp_clock) {
+ ptp_clock_unregister(timer->ptp_clock);
+ timer->ptp_clock = NULL;
+ }
+ kfree(timer);
+ return 0;
+}
+
+int axienet_get_phc_index(void *priv)
+{
+ struct xlnx_ptp_timer *timer = (struct xlnx_ptp_timer *)priv;
+
+ if (timer->ptp_clock)
+ return ptp_clock_index(timer->ptp_clock);
+ else
+ return -1;
+}
+
+void *axienet_ptp_timer_probe(void __iomem *base, struct platform_device *pdev)
+{
+ struct xlnx_ptp_timer *timer;
+ struct timespec64 ts;
+ int err = 0;
+
+ timer = kzalloc(sizeof(*timer), GFP_KERNEL);
+ if (!timer)
+ return NULL;
+
+ timer->baseaddr = base;
+
+ timer->irq = platform_get_irq_byname(pdev, "interrupt_ptp_timer");
+
+ if (timer->irq < 0) {
+ timer->irq = platform_get_irq_byname(pdev, "rtc_irq");
+ if (timer->irq > 0) {
+ pr_err("ptp timer interrupt name 'rtc_irq' is"
+ "deprecated\n");
+ } else {
+ pr_err("ptp timer interrupt not found\n");
+ kfree(timer);
+ return NULL;
+ }
+ }
+ spin_lock_init(&timer->reg_lock);
+
+ timer->ptp_clock_info = xlnx_ptp_clock_info;
+
+ timer->ptp_clock = ptp_clock_register(&timer->ptp_clock_info,
+ &pdev->dev);
+
+ if (IS_ERR(timer->ptp_clock)) {
+ err = PTR_ERR(timer->ptp_clock);
+ pr_debug("Failed to register ptp clock\n");
+ goto out;
+ }
+
+ axienet_phc_index = ptp_clock_index(timer->ptp_clock);
+
+ ts = ktime_to_timespec64(ktime_get_real());
+
+ xlnx_ptp_settime(&timer->ptp_clock_info, &ts);
+
+ /* Enable interrupts */
+ err = request_irq(timer->irq,
+ xlnx_ptp_timer_isr,
+ 0,
+ "ptp_rtc",
+ (void *)timer);
+ if (err)
+ goto err_irq;
+
+ return timer;
+
+err_irq:
+ ptp_clock_unregister(timer->ptp_clock);
+out:
+ timer->ptp_clock = NULL;
+ return NULL;
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_xmit.c b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_xmit.c
new file mode 100644
index 000000000000..831b4b7b5085
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_xmit.c
@@ -0,0 +1,369 @@
+/*
+ * Xilinx FPGA Xilinx TSN PTP transfer protocol module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_axienet.h"
+#include "xilinx_tsn_ptp.h"
+#include "xilinx_tsn_timer.h"
+#include <linux/ptp_classify.h>
+
+#define PTP_ONE_SECOND 1000000000 /**< Value in ns */
+
+#define msg_type_string(type) \
+ ((type) == PTP_TYPE_SYNC) ? "SYNC" : \
+ ((type) == PTP_TYPE_FOLLOW_UP) ? "FOLLOW_UP" : \
+ ((type) == PTP_TYPE_PDELAYREQ) ? "PDELAY_REQ" : \
+ ((type) == PTP_TYPE_PDELAYRESP) ? "PDELAY_RESP" : \
+ ((type) == PTP_TYPE_PDELAYRESP_FOLLOW_UP) ? "PDELAY_RESP_FOLLOW_UP" : \
+ ((type) == PTP_TYPE_ANNOUNCE) ? "ANNOUNCE" : \
+ "UNKNOWN"
+
+/**
+ * memcpy_fromio_32 - copy ptp buffer from HW
+ * @lp: Pointer to axienet local structure
+ * @offset: Offset in the PTP buffer
+ * @data: Destination buffer
+ * @len: Len to copy
+ *
+ * This functions copies the data from PTP buffer to destination data buffer
+ */
+static void memcpy_fromio_32(struct axienet_local *lp,
+ unsigned long offset, u8 *data, size_t len)
+{
+ while (len >= 4) {
+ *(u32 *)data = axienet_ior(lp, offset);
+ len -= 4;
+ offset += 4;
+ data += 4;
+ }
+
+ if (len > 0) {
+ u32 leftover = axienet_ior(lp, offset);
+ u8 *src = (u8 *)&leftover;
+
+ while (len) {
+ *data++ = *src++;
+ len--;
+ }
+ }
+}
+
+/**
+ * memcpy_toio_32 - copy ptp buffer from HW
+ * @lp: Pointer to axienet local structure
+ * @offset: Offset in the PTP buffer
+ * @data: Source data
+ * @len: Len to copy
+ *
+ * This functions copies the source data to desination ptp buffer
+ */
+static void memcpy_toio_32(struct axienet_local *lp,
+ unsigned long offset, u8 *data, size_t len)
+{
+ while (len >= 4) {
+ axienet_iow(lp, offset, *(u32 *)data);
+ len -= 4;
+ offset += 4;
+ data += 4;
+ }
+
+ if (len > 0) {
+ u32 leftover = 0;
+ u8 *dest = (u8 *)&leftover;
+
+ while (len) {
+ *dest++ = *data++;
+ len--;
+ }
+ axienet_iow(lp, offset, leftover);
+ }
+}
+
+static int is_sync(struct sk_buff *skb)
+{
+ u8 *msg_type;
+
+ msg_type = (u8 *)skb->data + ETH_HLEN;
+
+ return (*msg_type & 0xf) == PTP_TYPE_SYNC;
+}
+
+/**
+ * axienet_ptp_xmit - xmit skb using PTP HW
+ * @skb: sk_buff pointer that contains data to be Txed.
+ * @ndev: Pointer to net_device structure.
+ *
+ * Return: NETDEV_TX_OK, on success
+ * NETDEV_TX_BUSY, if any of the descriptors are not free
+ *
+ * This function is called to transmit a PTP skb. The function uses
+ * the free PTP TX buffer entry and sends the frame
+ */
+int axienet_ptp_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ u8 msg_type;
+ struct axienet_local *lp = netdev_priv(ndev);
+ unsigned long flags;
+ u8 tx_frame_waiting;
+ u8 free_index;
+ u32 cmd1_field = 0;
+ u32 cmd2_field = 0;
+
+ msg_type = *(u8 *)(skb->data + ETH_HLEN);
+
+ pr_debug(" -->XMIT: protocol: %x message: %s frame_len: %d\n",
+ skb->protocol,
+ msg_type_string(msg_type & 0xf), skb->len);
+
+ tx_frame_waiting = (axienet_ior(lp, PTP_TX_CONTROL_OFFSET) &
+ PTP_TX_FRAME_WAITING_MASK) >>
+ PTP_TX_FRAME_WAITING_SHIFT;
+
+ /* we reached last frame */
+ if (tx_frame_waiting & (1 << 7)) {
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ pr_debug("tx_frame_waiting: %d\n", tx_frame_waiting);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* go to next available slot */
+ free_index = fls(tx_frame_waiting);
+
+ /* write the len */
+ if (lp->ptp_ts_type == HWTSTAMP_TX_ONESTEP_SYNC &&
+ is_sync(skb)) {
+ /* enable 1STEP SYNC */
+ cmd1_field |= PTP_TX_CMD_1STEP_SHIFT;
+ cmd2_field |= PTP_TOD_FIELD_OFFSET;
+ }
+
+ cmd1_field |= skb->len;
+
+ axienet_iow(lp, PTP_TX_BUFFER_OFFSET(free_index), cmd1_field);
+ axienet_iow(lp, PTP_TX_BUFFER_OFFSET(free_index) +
+ PTP_TX_BUFFER_CMD2_FIELD, cmd2_field);
+ memcpy_toio_32(lp,
+ (PTP_TX_BUFFER_OFFSET(free_index) +
+ PTP_TX_CMD_FIELD_LEN),
+ skb->data, skb->len);
+
+ /* send the frame */
+ axienet_iow(lp, PTP_TX_CONTROL_OFFSET, (1 << free_index));
+
+ if (lp->ptp_ts_type != HWTSTAMP_TX_ONESTEP_SYNC ||
+ (!is_sync(skb))) {
+ spin_lock_irqsave(&lp->ptp_tx_lock, flags);
+ skb->cb[0] = free_index;
+ skb_queue_tail(&lp->ptp_txq, skb);
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_tx_timestamp(skb);
+ spin_unlock_irqrestore(&lp->ptp_tx_lock, flags);
+ }
+ return NETDEV_TX_OK;
+}
+
+/**
+ * axienet_set_timestamp - timestamp skb with HW timestamp
+ * @lp: Pointer to axienet local structure
+ * @hwtstamps: Pointer to skb timestamp structure
+ * @offset: offset of the timestamp in the PTP buffer
+ *
+ * Return: None.
+ *
+ */
+static void axienet_set_timestamp(struct axienet_local *lp,
+ struct skb_shared_hwtstamps *hwtstamps,
+ unsigned int offset)
+{
+ u32 captured_ns;
+ u32 captured_sec;
+
+ captured_ns = axienet_ior(lp, offset + 4);
+ captured_sec = axienet_ior(lp, offset);
+
+ /* Upper 32 bits contain s, lower 32 bits contain ns. */
+ hwtstamps->hwtstamp = ktime_set(captured_sec,
+ captured_ns);
+}
+
+/**
+ * axienet_ptp_recv - receive ptp buffer in skb from HW
+ * @ndev: Pointer to net_device structure.
+ *
+ * This function is called from the ptp rx isr. It allocates skb, and
+ * copies the ptp rx buffer data to it and calls netif_rx for further
+ * processing.
+ *
+ */
+static void axienet_ptp_recv(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ unsigned long ptp_frame_base_addr = 0;
+ struct sk_buff *skb;
+ u16 msg_len;
+ u8 msg_type;
+ u32 bytes = 0;
+ u32 packets = 0;
+
+ pr_debug("%s:\n ", __func__);
+
+ while (((lp->ptp_rx_hw_pointer & 0xf) !=
+ (lp->ptp_rx_sw_pointer & 0xf))) {
+ skb = netdev_alloc_skb(ndev, PTP_RX_FRAME_SIZE);
+
+ lp->ptp_rx_sw_pointer += 1;
+
+ ptp_frame_base_addr = PTP_RX_BASE_OFFSET +
+ ((lp->ptp_rx_sw_pointer & 0xf) *
+ PTP_RX_HWBUF_SIZE);
+
+ memset(skb->data, 0x0, PTP_RX_FRAME_SIZE);
+
+ memcpy_fromio_32(lp, ptp_frame_base_addr, skb->data,
+ PTP_RX_FRAME_SIZE);
+
+ msg_type = *(u8 *)(skb->data + ETH_HLEN) & 0xf;
+ msg_len = *(u16 *)(skb->data + ETH_HLEN + 2);
+
+ skb_put(skb, ntohs(msg_len) + ETH_HLEN);
+
+ bytes += skb->len;
+ packets++;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ pr_debug(" -->RECV: protocol: %x message: %s frame_len: %d\n",
+ skb->protocol, msg_type_string(msg_type & 0xf),
+ skb->len);
+ /* timestamp only event messages */
+ if (!(msg_type & PTP_MSG_TYPE_MASK)) {
+ axienet_set_timestamp(lp, skb_hwtstamps(skb),
+ (ptp_frame_base_addr +
+ PTP_HW_TSTAMP_OFFSET));
+ }
+
+ netif_rx(skb);
+ }
+ ndev->stats.rx_packets += packets;
+ ndev->stats.rx_bytes += bytes;
+}
+
+/**
+ * axienet_ptp_rx_irq - PTP RX ISR handler
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED for all cases.
+ */
+irqreturn_t axienet_ptp_rx_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ pr_debug("%s: received\n ", __func__);
+ lp->ptp_rx_hw_pointer = (axienet_ior(lp, PTP_RX_CONTROL_OFFSET)
+ & PTP_RX_PACKET_FIELD_MASK) >> 8;
+
+ axienet_ptp_recv(ndev);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * axienet_tx_tstamp - timestamp skb on trasmit path
+ * @work: Pointer to work_struct structure
+ *
+ * This adds TX timestamp to skb
+ */
+void axienet_tx_tstamp(struct work_struct *work)
+{
+ struct axienet_local *lp = container_of(work, struct axienet_local,
+ tx_tstamp_work);
+ struct net_device *ndev = lp->ndev;
+ struct skb_shared_hwtstamps hwtstamps;
+ struct sk_buff *skb;
+ unsigned long ts_reg_offset;
+ unsigned long flags;
+ u8 tx_packet;
+ u8 index;
+ u32 bytes = 0;
+ u32 packets = 0;
+
+ memset(&hwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+
+ spin_lock_irqsave(&lp->ptp_tx_lock, flags);
+
+ tx_packet = (axienet_ior(lp, PTP_TX_CONTROL_OFFSET) &
+ PTP_TX_PACKET_FIELD_MASK) >>
+ PTP_TX_PACKET_FIELD_SHIFT;
+
+ while ((skb = __skb_dequeue(&lp->ptp_txq)) != NULL) {
+ index = skb->cb[0];
+
+ /* dequeued packet yet to be xmited? */
+ if (index > tx_packet) {
+ /* enqueue it back and break */
+ skb_queue_tail(&lp->ptp_txq, skb);
+ break;
+ }
+ /* time stamp reg offset */
+ ts_reg_offset = PTP_TX_BUFFER_OFFSET(index) +
+ PTP_HW_TSTAMP_OFFSET;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+ axienet_set_timestamp(lp, &hwtstamps, ts_reg_offset);
+ skb_tstamp_tx(skb, &hwtstamps);
+ }
+
+ bytes += skb->len;
+ packets++;
+ dev_kfree_skb_any(skb);
+ }
+ ndev->stats.tx_packets += packets;
+ ndev->stats.tx_bytes += bytes;
+
+ spin_unlock_irqrestore(&lp->ptp_tx_lock, flags);
+}
+
+/**
+ * axienet_ptp_tx_irq - PTP TX irq handler
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED for all cases.
+ *
+ */
+irqreturn_t axienet_ptp_tx_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ pr_debug("%s: got tx interrupt\n", __func__);
+
+ /* read ctrl register to clear the interrupt */
+ axienet_ior(lp, PTP_TX_CONTROL_OFFSET);
+
+ schedule_work(&lp->tx_tstamp_work);
+
+ netif_wake_queue(ndev);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_qci.c b/drivers/net/ethernet/xilinx/xilinx_tsn_qci.c
new file mode 100644
index 000000000000..20efa6c4d365
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_qci.c
@@ -0,0 +1,151 @@
+/*
+ * Xilinx FPGA Xilinx TSN QCI Controller module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_tsn_switch.h"
+
+#define SMC_MODE_SHIFT 28
+#define SMC_CBR_MASK 0x00FFFFFF
+#define SMC_EBR_MASK 0x00FFFFFF
+#define IN_PORTID_MASK 0x3
+#define IN_PORT_SHIFT 14
+#define MAX_FR_SIZE_MASK 0x00000FFF
+
+#define GATE_ID_SHIFT 24
+#define METER_ID_SHIFT 8
+#define EN_METER_SHIFT 6
+#define ALLOW_STREM_SHIFT 5
+#define EN_PSFP_SHIFT 4
+#define WR_OP_TYPE_MASK 0x3
+#define WR_OP_TYPE_SHIFT 2
+#define OP_TYPE_SHIFT 1
+#define PSFP_EN_CONTROL_MASK 0x1
+
+/**
+ * psfp_control - Configure thr control for PSFP
+ * @data: Value to be programmed
+ */
+void psfp_control(struct psfp_config data)
+{
+ u32 mask;
+ u32 timeout = 20000;
+
+ mask = data.gate_id << GATE_ID_SHIFT;
+ mask |= data.meter_id << METER_ID_SHIFT;
+ mask |= data.en_meter << EN_METER_SHIFT;
+ mask |= data.allow_stream << ALLOW_STREM_SHIFT;
+ mask |= data.en_psfp << EN_PSFP_SHIFT;
+ mask |= (data.wr_op_type & WR_OP_TYPE_MASK) << WR_OP_TYPE_SHIFT;
+ mask |= data.op_type << OP_TYPE_SHIFT;
+ mask |= PSFP_EN_CONTROL_MASK;
+
+ axienet_iow(&lp, PSFP_CONTROL_OFFSET, mask);
+
+ /* wait for write to complete */
+ while ((axienet_ior(&lp, PSFP_CONTROL_OFFSET) &
+ PSFP_EN_CONTROL_MASK) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("PSFP control write took longer time!!");
+}
+
+/**
+ * get_stream_filter_config - Get Stream Filter Configuration
+ * @data: Value returned
+ */
+void get_stream_filter_config(struct stream_filter *data)
+{
+ u32 reg_val;
+
+ reg_val = axienet_ior(&lp, STREAM_FILTER_CONFIG_OFFSET);
+
+ data->max_fr_size = reg_val & MAX_FR_SIZE_MASK;
+ data->in_pid = (reg_val >> IN_PORT_SHIFT) & IN_PORTID_MASK;
+}
+
+/**
+ * config_stream_filter - Configure Stream Filter Configuration
+ * @data: Value to be programmed
+ */
+void config_stream_filter(struct stream_filter data)
+{
+ u32 mask;
+
+ mask = ((data.in_pid & IN_PORTID_MASK) << IN_PORT_SHIFT) |
+ (data.max_fr_size & MAX_FR_SIZE_MASK);
+ axienet_iow(&lp, STREAM_FILTER_CONFIG_OFFSET, mask);
+}
+
+/**
+ * get_meter_reg - Read Stream Meter Configuration registers value
+ * @data: Value returned
+ */
+void get_meter_reg(struct meter_config *data)
+{
+ u32 conf_r4;
+
+ data->cir = axienet_ior(&lp, STREAM_METER_CIR_OFFSET);
+ data->eir = axienet_ior(&lp, STREAM_METER_EIR_OFFSET);
+ data->cbr = axienet_ior(&lp, STREAM_METER_CBR_OFFSET) & SMC_CBR_MASK;
+ conf_r4 = axienet_ior(&lp, STREAM_METER_EBR_OFFSET);
+
+ data->ebr = conf_r4 & SMC_EBR_MASK;
+ data->mode = (conf_r4 & 0xF0000000) >> SMC_MODE_SHIFT;
+}
+
+/**
+ * program_meter_reg - configure Stream Meter Configuration registers
+ * @data: Value to be programmed
+ */
+void program_meter_reg(struct meter_config data)
+{
+ u32 conf_r4;
+
+ axienet_iow(&lp, STREAM_METER_CIR_OFFSET, data.cir);
+ axienet_iow(&lp, STREAM_METER_EIR_OFFSET, data.eir);
+ axienet_iow(&lp, STREAM_METER_CBR_OFFSET, data.cbr & SMC_CBR_MASK);
+
+ conf_r4 = (data.ebr & SMC_EBR_MASK) | (data.mode << SMC_MODE_SHIFT);
+ axienet_iow(&lp, STREAM_METER_EBR_OFFSET, conf_r4);
+}
+
+/**
+ * get_psfp_static_counter - get memory static counters value
+ * @data : return value, containing counter value
+ */
+void get_psfp_static_counter(struct psfp_static_counter *data)
+{
+ int offset = (data->num) * 8;
+
+ data->psfp_fr_count.lsb = axienet_ior(&lp, TOTAL_PSFP_FRAMES_OFFSET +
+ offset);
+ data->psfp_fr_count.msb = axienet_ior(&lp, TOTAL_PSFP_FRAMES_OFFSET +
+ offset + 0x4);
+
+ data->err_filter_ins_port.lsb = axienet_ior(&lp,
+ FLTR_INGS_PORT_ERR_OFFSET + offset);
+ data->err_filter_ins_port.msb = axienet_ior(&lp,
+ FLTR_INGS_PORT_ERR_OFFSET + offset + 0x4);
+
+ data->err_filtr_sdu.lsb = axienet_ior(&lp, FLTR_STDU_ERR_OFFSET +
+ offset);
+ data->err_filtr_sdu.msb = axienet_ior(&lp, FLTR_STDU_ERR_OFFSET +
+ offset + 0x4);
+
+ data->err_meter.lsb = axienet_ior(&lp, METER_ERR_OFFSET + offset);
+ data->err_meter.msb = axienet_ior(&lp, METER_ERR_OFFSET + offset + 0x4);
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.c b/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.c
new file mode 100644
index 000000000000..e7a054b78a6e
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.c
@@ -0,0 +1,232 @@
+/*
+ * Xilinx FPGA Xilinx TSN QBV sheduler module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_axienet.h"
+#include "xilinx_tsn_shaper.h"
+
+static inline int axienet_map_gs_to_hw(struct axienet_local *lp, u32 gs)
+{
+ u8 be_queue = 0;
+ u8 re_queue = 1;
+ u8 st_queue = 2;
+ unsigned int acl_bit_map = 0;
+
+ if (lp->num_tc == 2)
+ st_queue = 1;
+
+ if (gs & GS_BE_OPEN)
+ acl_bit_map |= (1 << be_queue);
+ if (gs & GS_ST_OPEN)
+ acl_bit_map |= (1 << st_queue);
+ if (lp->num_tc == 3 && (gs & GS_RE_OPEN))
+ acl_bit_map |= (1 << re_queue);
+
+ return acl_bit_map;
+}
+
+static int __axienet_set_schedule(struct net_device *ndev, struct qbv_info *qbv)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ u16 i;
+ unsigned int acl_bit_map = 0;
+ u32 u_config_change = 0;
+ u8 port = qbv->port;
+
+ if (qbv->cycle_time == 0) {
+ /* clear the gate enable bit */
+ u_config_change &= ~CC_ADMIN_GATE_ENABLE_BIT;
+ /* open all the gates */
+ u_config_change |= CC_ADMIN_GATE_STATE_SHIFT;
+
+ axienet_iow(lp, CONFIG_CHANGE(port), u_config_change);
+
+ return 0;
+ }
+
+ if (axienet_ior(lp, PORT_STATUS(port)) & 1) {
+ if (qbv->force) {
+ u_config_change &= ~CC_ADMIN_GATE_ENABLE_BIT;
+ axienet_iow(lp, CONFIG_CHANGE(port), u_config_change);
+ } else {
+ return -EALREADY;
+ }
+ }
+ /* write admin time */
+ axienet_iow(lp, ADMIN_CYCLE_TIME_DENOMINATOR(port),
+ qbv->cycle_time & CYCLE_TIME_DENOMINATOR_MASK);
+
+ axienet_iow(lp, ADMIN_BASE_TIME_NS(port), qbv->ptp_time_ns);
+
+ axienet_iow(lp, ADMIN_BASE_TIME_SEC(port),
+ qbv->ptp_time_sec & 0xFFFFFFFF);
+ axienet_iow(lp, ADMIN_BASE_TIME_SECS(port),
+ (qbv->ptp_time_sec >> 32) & BASE_TIME_SECS_MASK);
+
+ u_config_change = axienet_ior(lp, CONFIG_CHANGE(port));
+
+ u_config_change &= ~(CC_ADMIN_CTRL_LIST_LENGTH_MASK <<
+ CC_ADMIN_CTRL_LIST_LENGTH_SHIFT);
+ u_config_change |= (qbv->list_length & CC_ADMIN_CTRL_LIST_LENGTH_MASK)
+ << CC_ADMIN_CTRL_LIST_LENGTH_SHIFT;
+
+ /* program each list */
+ for (i = 0; i < qbv->list_length; i++) {
+ acl_bit_map = axienet_map_gs_to_hw(lp, qbv->acl_gate_state[i]);
+ axienet_iow(lp, ADMIN_CTRL_LIST(port, i),
+ (acl_bit_map & (ACL_GATE_STATE_MASK)) <<
+ ACL_GATE_STATE_SHIFT);
+
+ /* set the time for each entry */
+ axienet_iow(lp, ADMIN_CTRL_LIST_TIME(port, i),
+ qbv->acl_gate_time[i] & CTRL_LIST_TIME_INTERVAL_MASK);
+ }
+
+ /* clear interrupt status */
+ axienet_iow(lp, INT_STATUS(port), 0);
+
+ /* kick in new config change */
+ u_config_change |= CC_ADMIN_CONFIG_CHANGE_BIT;
+
+ /* enable gate */
+ u_config_change |= CC_ADMIN_GATE_ENABLE_BIT;
+
+ /* start */
+ axienet_iow(lp, CONFIG_CHANGE(port), u_config_change);
+
+ return 0;
+}
+
+int axienet_set_schedule(struct net_device *ndev, void __user *useraddr)
+{
+ struct qbv_info *config;
+ int ret;
+
+ config = kmalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ if (copy_from_user(config, useraddr, sizeof(struct qbv_info))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ pr_debug("setting new schedule\n");
+
+ ret = __axienet_set_schedule(ndev, config);
+out:
+ kfree(config);
+ return ret;
+}
+
+static int __axienet_get_schedule(struct net_device *ndev, struct qbv_info *qbv)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ u16 i = 0;
+ u32 u_value = 0;
+ u8 port = qbv->port;
+
+ if (!(axienet_ior(lp, CONFIG_CHANGE(port)) &
+ CC_ADMIN_GATE_ENABLE_BIT)) {
+ qbv->cycle_time = 0;
+ return 0;
+ }
+
+ u_value = axienet_ior(lp, GATE_STATE(port));
+ qbv->list_length = (u_value >> CC_ADMIN_CTRL_LIST_LENGTH_SHIFT) &
+ CC_ADMIN_CTRL_LIST_LENGTH_MASK;
+
+ u_value = axienet_ior(lp, OPER_CYCLE_TIME_DENOMINATOR(port));
+ qbv->cycle_time = u_value & CYCLE_TIME_DENOMINATOR_MASK;
+
+ u_value = axienet_ior(lp, OPER_BASE_TIME_NS(port));
+ qbv->ptp_time_ns = u_value & OPER_BASE_TIME_NS_MASK;
+
+ qbv->ptp_time_sec = axienet_ior(lp, OPER_BASE_TIME_SEC(port));
+ u_value = axienet_ior(lp, OPER_BASE_TIME_SECS(port));
+ qbv->ptp_time_sec |= (u64)(u_value & BASE_TIME_SECS_MASK) << 32;
+
+ for (i = 0; i < qbv->list_length; i++) {
+ u_value = axienet_ior(lp, OPER_CTRL_LIST(port, i));
+ qbv->acl_gate_state[i] = (u_value >> ACL_GATE_STATE_SHIFT) &
+ ACL_GATE_STATE_MASK;
+ /**
+ * In 2Q system, the actual ST Gate state value is 2,
+ * for user the ST Gate state value is always 4.
+ */
+ if (lp->num_tc == 2 && qbv->acl_gate_state[i] == 2)
+ qbv->acl_gate_state[i] = 4;
+
+ u_value = axienet_ior(lp, OPER_CTRL_LIST_TIME(port, i));
+ qbv->acl_gate_time[i] = u_value & CTRL_LIST_TIME_INTERVAL_MASK;
+ }
+ return 0;
+}
+
+int axienet_get_schedule(struct net_device *ndev, void __user *useraddr)
+{
+ struct qbv_info *qbv;
+ int ret = 0;
+
+ qbv = kmalloc(sizeof(*qbv), GFP_KERNEL);
+ if (!qbv)
+ return -ENOMEM;
+
+ if (copy_from_user(qbv, useraddr, sizeof(struct qbv_info))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ __axienet_get_schedule(ndev, qbv);
+
+ if (copy_to_user(useraddr, qbv, sizeof(struct qbv_info)))
+ ret = -EFAULT;
+out:
+ kfree(qbv);
+ return ret;
+}
+
+static irqreturn_t axienet_qbv_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ u8 port = 0; /* TODO */
+
+ /* clear status */
+ axienet_iow(lp, INT_CLEAR(port), 0);
+
+ return IRQ_HANDLED;
+}
+
+int axienet_qbv_init(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ int rc;
+
+ rc = request_irq(lp->qbv_irq, axienet_qbv_irq, 0, ndev->name, ndev);
+ if (rc)
+ goto err_qbv_irq;
+
+err_qbv_irq:
+ return rc;
+}
+
+void axienet_qbv_remove(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ free_irq(lp->qbv_irq, ndev);
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.h b/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.h
new file mode 100644
index 000000000000..ac2e54d0e134
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.h
@@ -0,0 +1,151 @@
+/*
+ * Xilinx TSN QBV scheduler header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef XILINX_TSN_SHAPER_H
+#define XILINX_TSN_SHAPER_H
+
+/* 0x0 CONFIG_CHANGE
+ * 0x8 GATE_STATE
+ * 0x10 ADMIN_CTRL_LIST_LENGTH
+ * 0x18 ADMIN_CYCLE_TIME_DENOMINATOR
+ * 0x20 ADMIN_BASE_TIME_NS
+ * 0x24 ADMIN_BASE_TIME_SEC
+ * 0x28 ADMIN_BASE_TIME_SECS
+ * 0x30 INT_STAT
+ * 0x34 INT_EN
+ * 0x38 INT_CLR
+ * 0x3c STATUS
+ * 0x40 CONFIG_CHANGE_TIME_NS
+ * 0x44 CONFIG_CHANGE_TIME_SEC
+ * 0x48 CONFIG_CHANGE_TIME_SECS
+ * 0x50 OPER_CTRL_LIST_LENGTH
+ * 0x58 OPER_CYCLE_TIME_DENOMINATOR
+ * 0x60 OPER_BASE_TIME_NS
+ * 0x64 OPER_BASE_TIME_SEC
+ * 0x68 OPER_BASE_TIME_SECS
+ * 0x6c BE_XMIT_OVRRUN_CNT
+ * 0x74 RES_XMIT_OVRRUN_CNT
+ * 0x7c ST_XMIT_OVRRUN_CNT
+ */
+
+enum hw_port {
+ PORT_EP = 0,
+ PORT_TEMAC_1,
+ PORT_TEMAC_2,
+};
+
+ /* EP */ /* TEMAC1 */ /* TEMAC2*/
+static u32 qbv_reg_map[3] = { 0x0, 0x14000, 0x14000 };
+
+/* 0x14000 0x14FFC Time Schedule Registers (Control & Status)
+ * 0x15000 0x15FFF Time Schedule Control List Entries
+ */
+
+#define TIME_SCHED_BASE(port) qbv_reg_map[(port)]
+
+#define CTRL_LIST_BASE(port) (TIME_SCHED_BASE(port) + 0x1000)
+
+/* control list entries
+ * admin control list 0 : 31
+ * "Time interval between two gate entries" must be greater than
+ * "time required to transmit biggest supported frame" on that queue when
+ * the gate for the queue is going from open to close state.
+ */
+#define ADMIN_CTRL_LIST(port, n) (CTRL_LIST_BASE(port) + ((n) * 8))
+#define ACL_GATE_STATE_SHIFT 8
+#define ACL_GATE_STATE_MASK 0x7
+#define ADMIN_CTRL_LIST_TIME(port, n) (ADMIN_CTRL_LIST((port), n) + 4)
+
+#define OPER_CTRL_LIST(port, n) (CTRL_LIST_BASE(port) + 0x800 + ((n) * 8))
+#define OPER_CTRL_LIST_TIME(port, n) (OPER_CTRL_LIST(port, n) + 4)
+#define CTRL_LIST_TIME_INTERVAL_MASK 0xFFFFF
+
+#define CONFIG_CHANGE(port) (TIME_SCHED_BASE(port) + 0x0)
+#define CC_ADMIN_GATE_STATE_SHIFT 0x7
+#define CC_ADMIN_GATE_STATE_MASK (7)
+#define CC_ADMIN_CTRL_LIST_LENGTH_SHIFT (8)
+#define CC_ADMIN_CTRL_LIST_LENGTH_MASK (0x1FF)
+/* This request bit is set when all the related Admin* filelds are populated.
+ * This bit is set by S/W and clear by core when core start with new schedule.
+ * Once set it can only be cleared by core or hard/soft reset.
+ */
+#define CC_ADMIN_CONFIG_CHANGE_BIT BIT(30)
+#define CC_ADMIN_GATE_ENABLE_BIT BIT(31)
+
+#define GATE_STATE(port) (TIME_SCHED_BASE(port) + 0x8)
+#define GS_OPER_GATE_STATE_SHIFT (0)
+#define GS_OPER_GATE_STATE_MASK (0x7)
+#define GS_OPER_CTRL_LIST_LENGTH_SHIFT (8)
+#define GS_OPER_CTRL_LIST_LENGTH_MASK (0x3F)
+#define GS_SUP_MAX_LIST_LENGTH_SHIFT (16)
+#define GS_SUP_MAX_LIST_LENGTH_MASK (0x3F)
+#define GS_TICK_GRANULARITY_SHIFT (24)
+#define GS_TICK_GRANULARITY_MASK (0x3F)
+
+#define ADMIN_CYCLE_TIME_DENOMINATOR(port) (TIME_SCHED_BASE(port) + 0x18)
+#define ADMIN_BASE_TIME_NS(port) (TIME_SCHED_BASE(port) + 0x20)
+#define ADMIN_BASE_TIME_SEC(port) (TIME_SCHED_BASE(port) + 0x24)
+#define ADMIN_BASE_TIME_SECS(port) (TIME_SCHED_BASE(port) + 0x28)
+
+#define INT_STATUS(port) (TIME_SCHED_BASE(port) + 0x30)
+#define INT_ENABLE(port) (TIME_SCHED_BASE(port) + 0x34)
+#define INT_CLEAR(port) (TIME_SCHED_BASE(port) + 0x38)
+#define PORT_STATUS(port) (TIME_SCHED_BASE(port) + 0x3c)
+
+/* Config Change time is valid after Config Pending bit is set. */
+#define CONFIG_CHANGE_TIME_NS(port) (TIME_SCHED_BASE((port)) + 0x40)
+#define CONFIG_CHANGE_TIME_SEC(port) (TIME_SCHED_BASE((port)) + 0x44)
+#define CONFIG_CHANGE_TIME_SECS(port) (TIME_SCHED_BASE((port)) + 0x48)
+
+#define OPER_CONTROL_LIST_LENGTH(port) (TIME_SCHED_BASE(port) + 0x50)
+#define OPER_CYCLE_TIME_DENOMINATOR(port) (TIME_SCHED_BASE(port) + 0x58)
+#define CYCLE_TIME_DENOMINATOR_MASK (0x3FFFFFFF)
+
+#define OPER_BASE_TIME_NS(port) (TIME_SCHED_BASE(port) + 0x60)
+#define OPER_BASE_TIME_NS_MASK (0x3FFFFFFF)
+#define OPER_BASE_TIME_SEC(port) (TIME_SCHED_BASE(port) + 0x64)
+#define OPER_BASE_TIME_SECS(port) (TIME_SCHED_BASE(port) + 0x68)
+#define BASE_TIME_SECS_MASK (0xFFFF)
+
+#define BE_XMIT_OVERRUN_COUNT(port) (TIME_SCHED_BASE(port) + 0x6c)
+#define RES_XMIT_OVERRUN_COUNT(port) (TIME_SCHED_BASE(port) + 0x74)
+#define ST_XMIT_OVERRUN_COUNT(port) (TIME_SCHED_BASE(port) + 0x7c)
+
+/* internally hw deals with queues only,
+ * in 3q system ST acl bitmap would be would 1 << 2
+ * in 2q system ST acl bitmap would be 1 << 1
+ * But this is confusing to users.
+ * so use the following fixed gate state and internally
+ * map them to hw
+ */
+#define GS_BE_OPEN BIT(0)
+#define GS_RE_OPEN BIT(1)
+#define GS_ST_OPEN BIT(2)
+#define QBV_MAX_ENTRIES 256
+
+struct qbv_info {
+ u8 port;
+ u8 force;
+ u32 cycle_time;
+ u64 ptp_time_sec;
+ u32 ptp_time_ns;
+ u32 list_length;
+ u32 acl_gate_state[QBV_MAX_ENTRIES];
+ u32 acl_gate_time[QBV_MAX_ENTRIES];
+};
+
+#endif /* XILINX_TSN_SHAPER_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_switch.c b/drivers/net/ethernet/xilinx/xilinx_tsn_switch.c
new file mode 100644
index 000000000000..cccaaa76cf7a
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_switch.c
@@ -0,0 +1,807 @@
+/*
+ * Xilinx FPGA Xilinx TSN Switch Controller driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_tsn_switch.h"
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+
+static struct miscdevice switch_dev;
+struct axienet_local lp;
+
+#define ADD 1
+#define DELETE 0
+
+#define PMAP_EGRESS_QUEUE_MASK 0x7
+#define PMAP_EGRESS_QUEUE0_SELECT 0x0
+#define PMAP_EGRESS_QUEUE1_SELECT 0x1
+#define PMAP_EGRESS_QUEUE2_SELECT 0x2
+#define PMAP_PRIORITY0_SHIFT 0
+#define PMAP_PRIORITY1_SHIFT 4
+#define PMAP_PRIORITY2_SHIFT 8
+#define PMAP_PRIORITY3_SHIFT 12
+#define PMAP_PRIORITY4_SHIFT 16
+#define PMAP_PRIORITY5_SHIFT 20
+#define PMAP_PRIORITY6_SHIFT 24
+#define PMAP_PRIORITY7_SHIFT 28
+#define SDL_EN_CAM_IPV_SHIFT 28
+#define SDL_CAM_IPV_SHIFT 29
+
+#define SDL_CAM_WR_ENABLE BIT(0)
+#define SDL_CAM_ADD_ENTRY 0x1
+#define SDL_CAM_DELETE_ENTRY 0x3
+#define SDL_CAM_VLAN_SHIFT 16
+#define SDL_CAM_VLAN_MASK 0xFFF
+#define SDL_CAM_IPV_MASK 0x7
+#define SDL_CAM_PORT_LIST_SHIFT 8
+#define SDL_GATEID_SHIFT 16
+#define SDL_CAM_FWD_TO_EP BIT(0)
+#define SDL_CAM_FWD_TO_PORT_1 BIT(1)
+#define SDL_CAM_FWD_TO_PORT_2 BIT(2)
+#define SDL_CAM_EP_ACTION_LIST_SHIFT 0
+#define SDL_CAM_MAC_ACTION_LIST_SHIFT 4
+#define SDL_CAM_DEST_MAC_XLATION BIT(0)
+#define SDL_CAM_VLAN_ID_XLATION BIT(1)
+#define SDL_CAM_UNTAG_FRAME BIT(2)
+
+/* Match table for of_platform binding */
+static const struct of_device_id tsnswitch_of_match[] = {
+ { .compatible = "xlnx,tsn-switch", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, tsnswitch_of_match);
+
+static int switch_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int switch_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/* set_frame_filter_option Frame Filtering Type Field Options */
+static void set_frame_filter_opt(u16 type1, u16 type2)
+{
+ int type = axienet_ior(&lp, XAS_FRM_FLTR_TYPE_FIELD_OPT_OFFSET);
+
+ if (type1)
+ type = (type & 0x0000FFFF) | (type1 << 16);
+ if (type2)
+ type = (type & 0xFFFF0000) | type2;
+ axienet_iow(&lp, XAS_FRM_FLTR_TYPE_FIELD_OPT_OFFSET, type);
+}
+
+/* MAC Port-1 Management Queueing Options */
+static void set_mac1_mngmntq(u32 config)
+{
+ axienet_iow(&lp, XAS_MAC1_MNG_Q_OPTION_OFFSET, config);
+}
+
+/* MAC Port-2 Management Queueing Options */
+static void set_mac2_mngmntq(u32 config)
+{
+ axienet_iow(&lp, XAS_MAC2_MNG_Q_OPTION_OFFSET, config);
+}
+
+/**
+ * set_switch_regs - read the various status of switch
+ * @data: Pointer which will be writen to switch
+ */
+static void set_switch_regs(struct switch_data *data)
+{
+ int tmp;
+ u8 mac_addr[6];
+
+ axienet_iow(&lp, XAS_CONTROL_OFFSET, data->switch_ctrl);
+ axienet_iow(&lp, XAS_PMAP_OFFSET, data->switch_prt);
+ mac_addr[0] = data->sw_mac_addr[0];
+ mac_addr[1] = data->sw_mac_addr[1];
+ mac_addr[2] = data->sw_mac_addr[2];
+ mac_addr[3] = data->sw_mac_addr[3];
+ mac_addr[4] = data->sw_mac_addr[4];
+ mac_addr[5] = data->sw_mac_addr[5];
+ axienet_iow(&lp, XAS_MAC_LSB_OFFSET,
+ (mac_addr[0] << 24) | (mac_addr[1] << 16) |
+ (mac_addr[2] << 8) | (mac_addr[3]));
+ axienet_iow(&lp, XAS_MAC_MSB_OFFSET, (mac_addr[4] << 8) | mac_addr[5]);
+
+ /* Threshold */
+ tmp = (data->thld_ep_mac[0].t1 << 16) | data->thld_ep_mac[0].t2;
+ axienet_iow(&lp, XAS_EP2MAC_ST_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_ep_mac[1].t1 << 16) | data->thld_ep_mac[1].t2;
+ axienet_iow(&lp, XAS_EP2MAC_RE_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_ep_mac[2].t1 << 16) | data->thld_ep_mac[2].t2;
+ axienet_iow(&lp, XAS_EP2MAC_BE_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_mac_mac[0].t1 << 16) | data->thld_mac_mac[0].t2;
+ axienet_iow(&lp, XAS_MAC2MAC_ST_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_mac_mac[1].t1 << 16) | data->thld_mac_mac[1].t2;
+ axienet_iow(&lp, XAS_MAC2MAC_RE_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_mac_mac[2].t1 << 16) | data->thld_mac_mac[2].t2;
+ axienet_iow(&lp, XAS_MAC2MAC_BE_FIFOT_OFFSET, tmp);
+
+ /* Port VLAN ID */
+ axienet_iow(&lp, XAS_EP_PORT_VLAN_OFFSET, data->ep_vlan);
+ axienet_iow(&lp, XAS_MAC_PORT_VLAN_OFFSET, data->mac_vlan);
+
+ /* max frame size */
+ axienet_iow(&lp, XAS_ST_MAX_FRAME_SIZE_OFFSET, data->max_frame_sc_que);
+ axienet_iow(&lp, XAS_RE_MAX_FRAME_SIZE_OFFSET, data->max_frame_res_que);
+ axienet_iow(&lp, XAS_BE_MAX_FRAME_SIZE_OFFSET, data->max_frame_be_que);
+}
+
+/**
+ * get_switch_regs - read the various status of switch
+ * @data: Pointer which will return the switch status
+ */
+static void get_switch_regs(struct switch_data *data)
+{
+ int tmp;
+
+ data->switch_status = axienet_ior(&lp, XAS_STATUS_OFFSET);
+ data->switch_ctrl = axienet_ior(&lp, XAS_CONTROL_OFFSET);
+ data->switch_prt = axienet_ior(&lp, XAS_PMAP_OFFSET);
+ tmp = axienet_ior(&lp, XAS_MAC_LSB_OFFSET);
+ data->sw_mac_addr[0] = (tmp & 0xFF000000) >> 24;
+ data->sw_mac_addr[1] = (tmp & 0xFF0000) >> 16;
+ data->sw_mac_addr[2] = (tmp & 0xFF00) >> 8;
+ data->sw_mac_addr[3] = (tmp & 0xFF);
+ tmp = axienet_ior(&lp, XAS_MAC_MSB_OFFSET);
+ data->sw_mac_addr[4] = (tmp & 0xFF00) >> 8;
+ data->sw_mac_addr[5] = (tmp & 0xFF);
+
+ /* Threshold */
+ tmp = axienet_ior(&lp, XAS_EP2MAC_ST_FIFOT_OFFSET);
+ data->thld_ep_mac[0].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_ep_mac[0].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_EP2MAC_RE_FIFOT_OFFSET);
+ data->thld_ep_mac[1].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_ep_mac[1].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_EP2MAC_BE_FIFOT_OFFSET);
+ data->thld_ep_mac[2].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_ep_mac[2].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_MAC2MAC_ST_FIFOT_OFFSET);
+ data->thld_mac_mac[0].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_mac_mac[0].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_MAC2MAC_RE_FIFOT_OFFSET);
+ data->thld_mac_mac[1].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_mac_mac[1].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_MAC2MAC_BE_FIFOT_OFFSET);
+ data->thld_mac_mac[2].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_mac_mac[2].t2 = tmp & (0xFFFF);
+
+ /* Port VLAN ID */
+ data->ep_vlan = axienet_ior(&lp, XAS_EP_PORT_VLAN_OFFSET);
+ data->mac_vlan = axienet_ior(&lp, XAS_MAC_PORT_VLAN_OFFSET);
+
+ /* max frame size */
+ data->max_frame_sc_que = (axienet_ior(&lp,
+ XAS_ST_MAX_FRAME_SIZE_OFFSET) & 0xFFFF);
+ data->max_frame_res_que = (axienet_ior(&lp,
+ XAS_RE_MAX_FRAME_SIZE_OFFSET) & 0xFFFF);
+ data->max_frame_be_que = (axienet_ior(&lp,
+ XAS_BE_MAX_FRAME_SIZE_OFFSET) & 0xFFFF);
+
+ /* frame filter type options*/
+ tmp = axienet_ior(&lp, XAS_FRM_FLTR_TYPE_FIELD_OPT_OFFSET);
+ data->typefield.type2 = (tmp & 0xFFFF0000) >> 16;
+ data->typefield.type2 = tmp & 0x0000FFFF;
+
+ /* MAC Port 1 Management Q option*/
+ data->mac1_config = axienet_ior(&lp, XAS_MAC1_MNG_Q_OPTION_OFFSET);
+ /* MAC Port 2 Management Q option*/
+ data->mac2_config = axienet_ior(&lp, XAS_MAC2_MNG_Q_OPTION_OFFSET);
+
+ /* Port VLAN Membership control*/
+ data->port_vlan_mem_ctrl = axienet_ior(&lp, XAS_VLAN_MEMB_CTRL_REG);
+ /* Port VLAN Membership read data*/
+ data->port_vlan_mem_data = axienet_ior(&lp, XAS_VLAN_MEMB_DATA_REG);
+}
+
+/**
+ * get_memory_static_counter - get memory static counters value
+ * @data: Value to be programmed
+ */
+static void get_memory_static_counter(struct switch_data *data)
+{
+ data->mem_arr_cnt.cam_lookup.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_CAM_LOOKUP);
+ data->mem_arr_cnt.cam_lookup.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_CAM_LOOKUP + 0x4);
+
+ data->mem_arr_cnt.multicast_fr.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_MULTCAST);
+ data->mem_arr_cnt.multicast_fr.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_MULTCAST + 0x4);
+
+ data->mem_arr_cnt.err_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_MAC1);
+ data->mem_arr_cnt.err_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_MAC1 + 0x4);
+
+ data->mem_arr_cnt.err_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_MAC2);
+ data->mem_arr_cnt.err_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_MAC2 + 0x4);
+
+ data->mem_arr_cnt.sc_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC1_EP);
+ data->mem_arr_cnt.sc_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC1_EP + 0x4);
+ data->mem_arr_cnt.res_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC1_EP);
+ data->mem_arr_cnt.res_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC1_EP + 0x4);
+ data->mem_arr_cnt.be_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC1_EP);
+ data->mem_arr_cnt.be_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC1_EP + 0x4);
+ data->mem_arr_cnt.err_sc_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC1_EP);
+ data->mem_arr_cnt.err_sc_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC1_EP + 0x4);
+ data->mem_arr_cnt.err_res_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC1_EP);
+ data->mem_arr_cnt.err_res_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC1_EP + 0x4);
+ data->mem_arr_cnt.err_be_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC1_EP);
+ data->mem_arr_cnt.err_be_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC1_EP + 0x4);
+
+ data->mem_arr_cnt.sc_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC2_EP);
+ data->mem_arr_cnt.sc_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC2_EP + 0x4);
+ data->mem_arr_cnt.res_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC2_EP);
+ data->mem_arr_cnt.res_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC2_EP + 0x4);
+ data->mem_arr_cnt.be_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC2_EP);
+ data->mem_arr_cnt.be_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC2_EP + 0x4);
+ data->mem_arr_cnt.err_sc_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC2_EP);
+ data->mem_arr_cnt.err_sc_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC2_EP + 0x4);
+ data->mem_arr_cnt.err_res_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC2_EP);
+ data->mem_arr_cnt.err_res_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC2_EP + 0x4);
+ data->mem_arr_cnt.err_be_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC2_EP);
+ data->mem_arr_cnt.err_be_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC2_EP + 0x4);
+
+ data->mem_arr_cnt.sc_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_EP_MAC1);
+ data->mem_arr_cnt.sc_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.res_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_EP_MAC1);
+ data->mem_arr_cnt.res_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.be_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_EP_MAC1);
+ data->mem_arr_cnt.be_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.err_sc_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_EP_MAC1);
+ data->mem_arr_cnt.err_sc_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.err_res_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_EP_MAC1);
+ data->mem_arr_cnt.err_res_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.err_be_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_EP_MAC1);
+ data->mem_arr_cnt.err_be_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_EP_MAC1 + 0x4);
+
+ data->mem_arr_cnt.sc_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC2_MAC1);
+ data->mem_arr_cnt.sc_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.res_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC2_MAC1);
+ data->mem_arr_cnt.res_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.be_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC2_MAC1);
+ data->mem_arr_cnt.be_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.err_sc_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC2_MAC1);
+ data->mem_arr_cnt.err_sc_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.err_res_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC2_MAC1);
+ data->mem_arr_cnt.err_res_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.err_be_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC2_MAC1);
+ data->mem_arr_cnt.err_be_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC2_MAC1 + 0x4);
+
+ data->mem_arr_cnt.sc_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_EP_MAC2);
+ data->mem_arr_cnt.sc_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.res_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_EP_MAC2);
+ data->mem_arr_cnt.res_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.be_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_EP_MAC2);
+ data->mem_arr_cnt.be_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.err_sc_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_EP_MAC2);
+ data->mem_arr_cnt.err_sc_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.err_res_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_EP_MAC2);
+ data->mem_arr_cnt.err_res_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.err_be_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_EP_MAC2);
+ data->mem_arr_cnt.err_be_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_EP_MAC2 + 0x4);
+
+ data->mem_arr_cnt.sc_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC1_MAC2);
+ data->mem_arr_cnt.sc_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.res_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC1_MAC2);
+ data->mem_arr_cnt.res_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.be_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC1_MAC2);
+ data->mem_arr_cnt.be_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.err_sc_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC1_MAC2);
+ data->mem_arr_cnt.err_sc_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.err_res_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC1_MAC2);
+ data->mem_arr_cnt.err_res_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.err_be_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC1_MAC2);
+ data->mem_arr_cnt.err_be_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC1_MAC2 + 0x4);
+}
+
+static void add_delete_cam_entry(struct cam_struct data, u8 add)
+{
+ u32 port_action = 0;
+ u32 tv2 = 0;
+ u32 timeout = 20000;
+
+ /* wait for cam init done */
+ while (!(axienet_ior(&lp, XAS_SDL_CAM_STATUS_OFFSET) &
+ SDL_CAM_WR_ENABLE) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("CAM init took longer time!!");
+ /* mac and vlan */
+ axienet_iow(&lp, XAS_SDL_CAM_KEY1_OFFSET,
+ (data.dest_addr[0] << 24) | (data.dest_addr[1] << 16) |
+ (data.dest_addr[2] << 8) | (data.dest_addr[3]));
+ axienet_iow(&lp, XAS_SDL_CAM_KEY2_OFFSET,
+ ((data.dest_addr[4] << 8) | data.dest_addr[5]) |
+ ((data.vlanid & SDL_CAM_VLAN_MASK) << SDL_CAM_VLAN_SHIFT));
+
+ /* TV 1 and TV 2 */
+ axienet_iow(&lp, XAS_SDL_CAM_TV1_OFFSET,
+ (data.src_addr[0] << 24) | (data.src_addr[1] << 16) |
+ (data.src_addr[2] << 8) | (data.src_addr[3]));
+
+ tv2 = ((data.src_addr[4] << 8) | data.src_addr[5]) |
+ ((data.tv_vlanid & SDL_CAM_VLAN_MASK) << SDL_CAM_VLAN_SHIFT);
+
+#if IS_ENABLED(CONFIG_XILINX_TSN_QCI)
+ tv2 = tv2 | ((data.ipv & SDL_CAM_IPV_MASK) << SDL_CAM_IPV_SHIFT)
+ | (data.en_ipv << SDL_EN_CAM_IPV_SHIFT);
+#endif
+ axienet_iow(&lp, XAS_SDL_CAM_TV2_OFFSET, tv2);
+
+ if (data.tv_en)
+ port_action = ((SDL_CAM_DEST_MAC_XLATION |
+ SDL_CAM_VLAN_ID_XLATION) << SDL_CAM_MAC_ACTION_LIST_SHIFT);
+
+ port_action = port_action | (data.fwd_port << SDL_CAM_PORT_LIST_SHIFT);
+
+#if IS_ENABLED(CONFIG_XILINX_TSN_QCI) || IS_ENABLED(CONFIG_XILINX_TSN_CB)
+ port_action = port_action | (data.gate_id << SDL_GATEID_SHIFT);
+#endif
+
+ /* port action */
+ axienet_iow(&lp, XAS_SDL_CAM_PORT_ACT_OFFSET, port_action);
+
+ if (add)
+ axienet_iow(&lp, XAS_SDL_CAM_CTRL_OFFSET, SDL_CAM_ADD_ENTRY);
+ else
+ axienet_iow(&lp, XAS_SDL_CAM_CTRL_OFFSET, SDL_CAM_DELETE_ENTRY);
+
+ timeout = 20000;
+ /* wait for write to complete */
+ while ((axienet_ior(&lp, XAS_SDL_CAM_CTRL_OFFSET) &
+ SDL_CAM_WR_ENABLE) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("CAM write took longer time!!");
+}
+
+static void port_vlan_mem_ctrl(u32 port_vlan_mem)
+{
+ axienet_iow(&lp, XAS_VLAN_MEMB_CTRL_REG, port_vlan_mem);
+}
+
+static long switch_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long retval = 0;
+ struct switch_data data;
+#if IS_ENABLED(CONFIG_XILINX_TSN_QCI)
+ struct qci qci_data;
+#endif
+#if IS_ENABLED(CONFIG_XILINX_TSN_CB)
+ struct cb cb_data;
+#endif
+ switch (cmd) {
+ case GET_STATUS_SWITCH:
+ /* Switch configurations */
+ get_switch_regs(&data);
+
+ /* Memory static counter*/
+ get_memory_static_counter(&data);
+ if (copy_to_user((char __user *)arg, &data, sizeof(data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+
+ case SET_STATUS_SWITCH:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ set_switch_regs(&data);
+ break;
+
+ case ADD_CAM_ENTRY:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ add_delete_cam_entry(data.cam_data, ADD);
+ break;
+
+ case DELETE_CAM_ENTRY:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ add_delete_cam_entry(data.cam_data, DELETE);
+ break;
+
+ case PORT_VLAN_MEM_CTRL:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ port_vlan_mem_ctrl(data.port_vlan_mem_ctrl);
+ break;
+
+ case SET_FRAME_TYPE_FIELD:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ set_frame_filter_opt(data.typefield.type1,
+ data.typefield.type2);
+ break;
+
+ case SET_MAC1_MNGMNT_Q_CONFIG:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ set_mac1_mngmntq(data.mac1_config);
+ break;
+
+ case SET_MAC2_MNGMNT_Q_CONFIG:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ set_mac2_mngmntq(data.mac2_config);
+ break;
+#if IS_ENABLED(CONFIG_XILINX_TSN_QCI)
+ case CONFIG_METER_MEM:
+ if (copy_from_user(&qci_data, (char __user *)arg,
+ sizeof(qci_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ program_meter_reg(qci_data.meter_config_data);
+ break;
+
+ case CONFIG_GATE_MEM:
+ if (copy_from_user(&qci_data, (char __user *)arg,
+ sizeof(qci_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ config_stream_filter(qci_data.stream_config_data);
+ break;
+
+ case PSFP_CONTROL:
+ if (copy_from_user(&qci_data, (char __user *)arg,
+ sizeof(qci_data))) {
+ retval = -EINVAL;
+ pr_err("Copy from user failed\n");
+ goto end;
+ }
+ psfp_control(qci_data.psfp_config_data);
+ break;
+
+ case GET_STATIC_PSFP_COUNTER:
+ if (copy_from_user(&qci_data, (char __user *)arg,
+ sizeof(qci_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ get_psfp_static_counter(&qci_data.psfp_counter_data);
+ if (copy_to_user((char __user *)arg, &qci_data,
+ sizeof(qci_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+ case GET_METER_REG:
+ get_meter_reg(&qci_data.meter_config_data);
+ if (copy_to_user((char __user *)arg, &qci_data,
+ sizeof(qci_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+ case GET_STREAM_FLTR_CONFIG:
+ get_stream_filter_config(&qci_data.stream_config_data);
+ if (copy_to_user((char __user *)arg, &qci_data,
+ sizeof(qci_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+#endif
+#if IS_ENABLED(CONFIG_XILINX_TSN_CB)
+ case CONFIG_MEMBER_MEM:
+ if (copy_from_user(&cb_data, (char __user *)arg,
+ sizeof(cb_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ program_member_reg(cb_data.frer_memb_config_data);
+ break;
+
+ case CONFIG_INGRESS_FLTR:
+ if (copy_from_user(&cb_data, (char __user *)arg,
+ sizeof(cb_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ config_ingress_filter(cb_data.in_fltr_data);
+ break;
+
+ case FRER_CONTROL:
+ if (copy_from_user(&cb_data, (char __user *)arg,
+ sizeof(cb_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ frer_control(cb_data.frer_ctrl_data);
+ break;
+
+ case GET_STATIC_FRER_COUNTER:
+ if (copy_from_user(&cb_data, (char __user *)arg,
+ sizeof(cb_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ get_frer_static_counter(&cb_data.frer_counter_data);
+ if (copy_to_user((char __user *)arg, &cb_data,
+ sizeof(cb_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+
+ case GET_MEMBER_REG:
+ get_member_reg(&cb_data.frer_memb_config_data);
+ if (copy_to_user((char __user *)arg, &cb_data,
+ sizeof(cb_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+
+ case GET_INGRESS_FLTR:
+ get_ingress_filter_config(&cb_data.in_fltr_data);
+ if (copy_to_user((char __user *)arg, &cb_data,
+ sizeof(cb_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+#endif
+ }
+end:
+ return retval;
+}
+
+static const struct file_operations switch_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = switch_ioctl,
+ .open = switch_open,
+ .release = switch_release,
+};
+
+static int tsn_switch_init(void)
+{
+ int ret;
+
+ switch_dev.minor = MISC_DYNAMIC_MINOR;
+ switch_dev.name = "switch";
+ switch_dev.fops = &switch_fops;
+ ret = misc_register(&switch_dev);
+ if (ret < 0) {
+ pr_err("Switch driver registration failed!\n");
+ return ret;
+ }
+
+ pr_debug("Xilinx TSN Switch driver initialized!\n");
+ return 0;
+}
+
+static int tsn_switch_cam_init(u16 num_q)
+{
+ u32 pmap;
+ u32 timeout = 20000;
+
+ /* wait for switch init done */
+ while (!(axienet_ior(&lp, XAS_STATUS_OFFSET) &
+ SDL_CAM_WR_ENABLE) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("Switch init took longer time!!");
+
+ if (num_q == 3) {
+ /* map pcp = 2,3 to queue1
+ * pcp = 4 to queue2
+ */
+ pmap = ((PMAP_EGRESS_QUEUE1_SELECT << PMAP_PRIORITY2_SHIFT) |
+ (PMAP_EGRESS_QUEUE1_SELECT << PMAP_PRIORITY3_SHIFT) |
+ (PMAP_EGRESS_QUEUE2_SELECT << PMAP_PRIORITY4_SHIFT));
+ } else if (num_q == 2) {
+ /* pcp = 4 to queue1 */
+ pmap = (PMAP_EGRESS_QUEUE1_SELECT << PMAP_PRIORITY4_SHIFT);
+ }
+
+ axienet_iow(&lp, XAS_PMAP_OFFSET, pmap);
+
+ timeout = 20000;
+ /* wait for cam init done */
+ while (!(axienet_ior(&lp, XAS_SDL_CAM_STATUS_OFFSET) &
+ SDL_CAM_WR_ENABLE) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("CAM init took longer time!!");
+
+ return 0;
+}
+
+static int tsnswitch_probe(struct platform_device *pdev)
+{
+ struct resource *swt;
+ int ret;
+ u16 num_tc;
+
+ pr_info("TSN Switch probe\n");
+ /* Map device registers */
+ swt = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp.regs = devm_ioremap_resource(&pdev->dev, swt);
+ if (IS_ERR(lp.regs))
+ return PTR_ERR(lp.regs);
+
+ ret = of_property_read_u16(pdev->dev.of_node, "xlnx,num-tc",
+ &num_tc);
+ if (ret || (num_tc != 2 && num_tc != 3))
+ num_tc = XAE_MAX_TSN_TC;
+
+ pr_info("TSN Switch Initializing ....\n");
+ ret = tsn_switch_init();
+ if (ret)
+ return ret;
+ pr_info("TSN CAM Initializing ....\n");
+ ret = tsn_switch_cam_init(num_tc);
+
+ return ret;
+}
+
+static int tsnswitch_remove(struct platform_device *pdev)
+{
+ misc_deregister(&switch_dev);
+ return 0;
+}
+
+static struct platform_driver tsnswitch_driver = {
+ .probe = tsnswitch_probe,
+ .remove = tsnswitch_remove,
+ .driver = {
+ .name = "xilinx_tsnswitch",
+ .of_match_table = tsnswitch_of_match,
+ },
+};
+
+module_platform_driver(tsnswitch_driver);
+
+MODULE_DESCRIPTION("Xilinx TSN Switch driver");
+MODULE_AUTHOR("Xilinx");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_switch.h b/drivers/net/ethernet/xilinx/xilinx_tsn_switch.h
new file mode 100644
index 000000000000..9e5e21aea127
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_switch.h
@@ -0,0 +1,364 @@
+/*
+ * Xilinx TSN core switch header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef XILINX_TSN_SWITCH_H
+#define XILINX_TSN_SWITCH_H
+
+#include "xilinx_axienet.h"
+
+/* ioctls */
+#define GET_STATUS_SWITCH 0x16
+#define SET_STATUS_SWITCH 0x17
+#define ADD_CAM_ENTRY 0x18
+#define DELETE_CAM_ENTRY 0x19
+#define PORT_VLAN_MEM_CTRL 0x20
+#define SET_FRAME_TYPE_FIELD 0x21
+#define SET_MAC1_MNGMNT_Q_CONFIG 0x22
+#define SET_MAC2_MNGMNT_Q_CONFIG 0x23
+#define CONFIG_METER_MEM 0x24
+#define CONFIG_GATE_MEM 0x25
+#define PSFP_CONTROL 0x26
+#define GET_STATIC_PSFP_COUNTER 0x27
+#define GET_METER_REG 0x28
+#define GET_STREAM_FLTR_CONFIG 0x29
+#define CONFIG_MEMBER_MEM 0x2A
+#define CONFIG_INGRESS_FLTR 0x2B
+#define FRER_CONTROL 0x2C
+#define GET_STATIC_FRER_COUNTER 0x2D
+#define GET_MEMBER_REG 0x2E
+#define GET_INGRESS_FLTR 0x2F
+
+/* Xilinx Axi Switch Offsets*/
+#define XAS_STATUS_OFFSET 0x00000
+#define XAS_CONTROL_OFFSET 0x00004
+#define XAS_PMAP_OFFSET 0x00008
+#define XAS_MAC_LSB_OFFSET 0x0000C
+#define XAS_MAC_MSB_OFFSET 0x00010
+#define XAS_EP2MAC_ST_FIFOT_OFFSET 0x00020
+#define XAS_EP2MAC_RE_FIFOT_OFFSET 0x00024
+#define XAS_EP2MAC_BE_FIFOT_OFFSET 0x00028
+#define XAS_MAC2MAC_ST_FIFOT_OFFSET 0x00030
+#define XAS_MAC2MAC_RE_FIFOT_OFFSET 0x00034
+#define XAS_MAC2MAC_BE_FIFOT_OFFSET 0x00038
+#define XAS_EP_PORT_VLAN_OFFSET 0x00040
+#define XAS_MAC_PORT_VLAN_OFFSET 0x00044
+#define XAS_FRM_FLTR_TYPE_FIELD_OPT_OFFSET 0x00050
+#define XAS_MAC2_MNG_Q_OPTION_OFFSET 0x00054
+#define XAS_MAC1_MNG_Q_OPTION_OFFSET 0x00058
+#define XAS_ST_MAX_FRAME_SIZE_OFFSET 0x00060
+#define XAS_RE_MAX_FRAME_SIZE_OFFSET 0x00064
+#define XAS_BE_MAX_FRAME_SIZE_OFFSET 0x00068
+
+/* Memory static counters */
+#define XAS_MEM_STCNTR_CAM_LOOKUP 0x00400
+#define XAS_MEM_STCNTR_MULTCAST 0x00408
+#define XAS_MEM_STCNTR_ERR_MAC1 0x00410
+#define XAS_MEM_STCNTR_ERR_MAC2 0x00418
+#define XAS_MEM_STCNTR_SC_MAC1_EP 0x00420
+#define XAS_MEM_STCNTR_RES_MAC1_EP 0x00428
+#define XAS_MEM_STCNTR_BE_MAC1_EP 0x00430
+#define XAS_MEM_STCNTR_ERR_SC_MAC1_EP 0x00438
+#define XAS_MEM_STCNTR_ERR_RES_MAC1_EP 0x00440
+#define XAS_MEM_STCNTR_ERR_BE_MAC1_EP 0x00448
+#define XAS_MEM_STCNTR_SC_MAC2_EP 0x00458
+#define XAS_MEM_STCNTR_RES_MAC2_EP 0x00460
+#define XAS_MEM_STCNTR_BE_MAC2_EP 0x00468
+#define XAS_MEM_STCNTR_ERR_SC_MAC2_EP 0x00470
+#define XAS_MEM_STCNTR_ERR_RES_MAC2_EP 0x00478
+#define XAS_MEM_STCNTR_ERR_BE_MAC2_EP 0x00480
+#define XAS_MEM_STCNTR_SC_EP_MAC1 0x00490
+#define XAS_MEM_STCNTR_RES_EP_MAC1 0x00498
+#define XAS_MEM_STCNTR_BE_EP_MAC1 0x004A0
+#define XAS_MEM_STCNTR_ERR_SC_EP_MAC1 0x004A8
+#define XAS_MEM_STCNTR_ERR_RES_EP_MAC1 0x004B0
+#define XAS_MEM_STCNTR_ERR_BE_EP_MAC1 0x004B8
+#define XAS_MEM_STCNTR_SC_MAC2_MAC1 0x004C0
+#define XAS_MEM_STCNTR_RES_MAC2_MAC1 0x004C8
+#define XAS_MEM_STCNTR_BE_MAC2_MAC1 0x004D0
+#define XAS_MEM_STCNTR_ERR_SC_MAC2_MAC1 0x004D8
+#define XAS_MEM_STCNTR_ERR_RES_MAC2_MAC1 0x004E0
+#define XAS_MEM_STCNTR_ERR_BE_MAC2_MAC1 0x004E8
+#define XAS_MEM_STCNTR_SC_EP_MAC2 0x004F0
+#define XAS_MEM_STCNTR_RES_EP_MAC2 0x004F8
+#define XAS_MEM_STCNTR_BE_EP_MAC2 0x00500
+#define XAS_MEM_STCNTR_ERR_SC_EP_MAC2 0x00508
+#define XAS_MEM_STCNTR_ERR_RES_EP_MAC2 0x00510
+#define XAS_MEM_STCNTR_ERR_BE_EP_MAC2 0x00518
+#define XAS_MEM_STCNTR_SC_MAC1_MAC2 0x00520
+#define XAS_MEM_STCNTR_RES_MAC1_MAC2 0x00528
+#define XAS_MEM_STCNTR_BE_MAC1_MAC2 0x00530
+#define XAS_MEM_STCNTR_ERR_SC_MAC1_MAC2 0x00538
+#define XAS_MEM_STCNTR_ERR_RES_MAC1_MAC2 0x00540
+#define XAS_MEM_STCNTR_ERR_BE_MAC1_MAC2 0x00548
+
+/* Stream Destination Lookup CAM */
+#define XAS_SDL_CAM_CTRL_OFFSET 0x1000
+#define XAS_SDL_CAM_STATUS_OFFSET 0x1004
+#define XAS_SDL_CAM_KEY1_OFFSET 0x1008
+#define XAS_SDL_CAM_KEY2_OFFSET 0x100C
+#define XAS_SDL_CAM_TV1_OFFSET 0x1010
+#define XAS_SDL_CAM_TV2_OFFSET 0x1014
+#define XAS_SDL_CAM_PORT_ACT_OFFSET 0x1018
+
+/* Port VLAN Membership Memory */
+#define XAS_VLAN_MEMB_CTRL_REG 0x1100
+#define XAS_VLAN_MEMB_DATA_REG 0x1104
+
+/* QCI */
+#define PSFP_CONTROL_OFFSET 0x1200
+#define STREAM_FILTER_CONFIG_OFFSET 0x1204
+#define STREAM_METER_CIR_OFFSET 0x1208
+#define STREAM_METER_EIR_OFFSET 0x120C
+#define STREAM_METER_CBR_OFFSET 0x1210
+#define STREAM_METER_EBR_OFFSET 0x1214
+
+/* PSFP Statistics Counters */
+#define TOTAL_PSFP_FRAMES_OFFSET 0x2000
+#define FLTR_INGS_PORT_ERR_OFFSET 0x2800
+#define FLTR_STDU_ERR_OFFSET 0x3000
+#define METER_ERR_OFFSET 0x3800
+
+/* CB */
+#define FRER_CONTROL_OFFSET 0x1300
+#define INGRESS_FILTER_OFFSET 0x1304
+#define FRER_CONFIG_REG1 0x1308
+#define FRER_CONFIG_REG2 0x130C
+
+/* FRER Statistics Counters */
+#define TOTAL_FRER_FRAMES_OFFSET 0x4000
+#define FRER_DISCARD_INGS_FLTR_OFFSET 0x4800
+#define FRER_PASS_FRAMES_INDV_OFFSET 0x5000
+#define FRER_DISCARD_FRAMES_INDV_OFFSET 0x5800
+#define FRER_PASS_FRAMES_SEQ_OFFSET 0x6000
+#define FRER_DISCARD_FRAMES_SEQ_OFFSET 0x6800
+#define FRER_ROGUE_FRAMES_SEQ_OFFSET 0x7000
+#define SEQ_RECV_RESETS_OFFSET 0x7800
+
+/* 64 bit counter*/
+struct static_cntr {
+ u32 msb;
+ u32 lsb;
+};
+
+/*********** QCI Structures **************/
+struct psfp_config {
+ u8 gate_id;
+ u8 meter_id;
+ bool en_meter;
+ bool allow_stream;
+ bool en_psfp;
+ u8 wr_op_type;
+ bool op_type;
+};
+
+struct meter_config {
+ u32 cir;
+ u32 eir;
+ u32 cbr;
+ u32 ebr;
+ u8 mode;
+};
+
+struct stream_filter {
+ u8 in_pid; /* ingress port id*/
+ u16 max_fr_size; /* max frame size*/
+};
+
+/* PSFP Static counter*/
+struct psfp_static_counter {
+ struct static_cntr psfp_fr_count;
+ struct static_cntr err_filter_ins_port;
+ struct static_cntr err_filtr_sdu;
+ struct static_cntr err_meter;
+ unsigned char num;
+};
+
+/* QCI Core stuctures */
+struct qci {
+ struct meter_config meter_config_data;
+ struct stream_filter stream_config_data;
+ struct psfp_config psfp_config_data;
+ struct psfp_static_counter psfp_counter_data;
+};
+
+/************* QCI Structures end *************/
+
+/*********** CB Structures **************/
+struct frer_ctrl {
+ u8 gate_id;
+ u8 memb_id;
+ bool seq_reset;
+ bool gate_state;
+ bool rcvry_tmout;
+ bool frer_valid;
+ u8 wr_op_type;
+ bool op_type;
+};
+
+struct in_fltr {
+ u8 in_port_id;
+ u16 max_seq_id;
+};
+
+struct frer_memb_config {
+ u8 seq_rec_hist_len;
+ u8 split_strm_egport_id;
+ u16 split_strm_vlan_id;
+ u32 rem_ticks;
+};
+
+/* FRER Static counter*/
+struct frer_static_counter {
+ struct static_cntr frer_fr_count;
+ struct static_cntr disc_frames_in_portid;
+ struct static_cntr pass_frames_seq_recv;
+ struct static_cntr disc_frames_seq_recv;
+ struct static_cntr rogue_frames_seq_recv;
+ struct static_cntr pass_frames_ind_recv;
+ struct static_cntr disc_frames_ind_recv;
+ struct static_cntr seq_recv_rst;
+ unsigned char num;
+};
+
+/* CB Core stuctures */
+struct cb {
+ struct frer_ctrl frer_ctrl_data;
+ struct in_fltr in_fltr_data;
+ struct frer_memb_config frer_memb_config_data;
+ struct frer_static_counter frer_counter_data;
+};
+
+/************* CB Structures end *************/
+
+/********* Switch Structures Starts ***********/
+struct thershold {
+ u16 t1;
+ u16 t2;
+};
+
+/* memory static counters */
+struct mem_static_arr_cntr {
+ struct static_cntr cam_lookup;
+ struct static_cntr multicast_fr;
+ struct static_cntr err_mac1;
+ struct static_cntr err_mac2;
+ struct static_cntr sc_mac1_ep;
+ struct static_cntr res_mac1_ep;
+ struct static_cntr be_mac1_ep;
+ struct static_cntr err_sc_mac1_ep;
+ struct static_cntr err_res_mac1_ep;
+ struct static_cntr err_be_mac1_ep;
+ struct static_cntr sc_mac2_ep;
+ struct static_cntr res_mac2_ep;
+ struct static_cntr be_mac2_ep;
+ struct static_cntr err_sc_mac2_ep;
+ struct static_cntr err_res_mac2_ep;
+ struct static_cntr err_be_mac2_ep;
+ struct static_cntr sc_ep_mac1;
+ struct static_cntr res_ep_mac1;
+ struct static_cntr be_ep_mac1;
+ struct static_cntr err_sc_ep_mac1;
+ struct static_cntr err_res_ep_mac1;
+ struct static_cntr err_be_ep_mac1;
+ struct static_cntr sc_mac2_mac1;
+ struct static_cntr res_mac2_mac1;
+ struct static_cntr be_mac2_mac1;
+ struct static_cntr err_sc_mac2_mac1;
+ struct static_cntr err_res_mac2_mac1;
+ struct static_cntr err_be_mac2_mac1;
+ struct static_cntr sc_ep_mac2;
+ struct static_cntr res_ep_mac2;
+ struct static_cntr be_ep_mac2;
+ struct static_cntr err_sc_ep_mac2;
+ struct static_cntr err_res_ep_mac2;
+ struct static_cntr err_be_ep_mac2;
+ struct static_cntr sc_mac1_mac2;
+ struct static_cntr res_mac1_mac2;
+ struct static_cntr be_mac1_mac2;
+ struct static_cntr err_sc_mac1_mac2;
+ struct static_cntr err_res_mac1_mac2;
+ struct static_cntr err_be_mac1_mac2;
+};
+
+/* CAM structure */
+struct cam_struct {
+ u8 src_addr[6];
+ u8 dest_addr[6];
+ u16 vlanid;
+ u16 tv_vlanid;
+ u8 fwd_port;
+ bool tv_en;
+ u8 gate_id;
+ u8 ipv;
+ bool en_ipv;
+};
+
+/*Frame Filtering Type Field Option */
+struct ff_type {
+ u16 type1;
+ u16 type2;
+};
+
+/* Core switch structure*/
+struct switch_data {
+ u32 switch_status;
+ u32 switch_ctrl;
+ u32 switch_prt;
+ u8 sw_mac_addr[6];
+ /*0 - schedule, 1 - reserved, 2 - best effort queue*/
+ struct thershold thld_ep_mac[3];
+ struct thershold thld_mac_mac[3];
+ u32 ep_vlan;
+ u32 mac_vlan;
+ u32 max_frame_sc_que;
+ u32 max_frame_res_que;
+ u32 max_frame_be_que;
+ /* Memory counters */
+ struct mem_static_arr_cntr mem_arr_cnt;
+ /* CAM */
+ struct cam_struct cam_data;
+/* Frame Filtering Type Field Option */
+ struct ff_type typefield;
+/* MAC Port-1 Management Queueing Options */
+ int mac1_config;
+/* MAC Port-2 Management Queueing Options */
+ int mac2_config;
+/* Port VLAN Membership Registers */
+ int port_vlan_mem_ctrl;
+ char port_vlan_mem_data;
+};
+
+/********* Switch Structures ends ***********/
+
+extern struct axienet_local lp;
+
+/********* qci function declararions ********/
+void psfp_control(struct psfp_config data);
+void config_stream_filter(struct stream_filter data);
+void program_meter_reg(struct meter_config data);
+void get_psfp_static_counter(struct psfp_static_counter *data);
+void get_meter_reg(struct meter_config *data);
+void get_stream_filter_config(struct stream_filter *data);
+
+/********* cb function declararions ********/
+void frer_control(struct frer_ctrl data);
+void get_ingress_filter_config(struct in_fltr *data);
+void config_ingress_filter(struct in_fltr data);
+void get_member_reg(struct frer_memb_config *data);
+void program_member_reg(struct frer_memb_config data);
+void get_frer_static_counter(struct frer_static_counter *data);
+#endif /* XILINX_TSN_SWITCH_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_timer.h b/drivers/net/ethernet/xilinx/xilinx_tsn_timer.h
new file mode 100644
index 000000000000..4bb74e78d89a
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_timer.h
@@ -0,0 +1,73 @@
+/*
+ * Xilinx FPGA Xilinx TSN timer module header.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_TSN_H_
+#define _XILINX_TSN_H_
+
+#include <linux/platform_device.h>
+
+#define XAE_RTC_OFFSET 0x12800
+/* RTC Nanoseconds Field Offset Register */
+#define XTIMER1588_RTC_OFFSET_NS 0x00000
+/* RTC Seconds Field Offset Register - Low */
+#define XTIMER1588_RTC_OFFSET_SEC_L 0x00008
+/* RTC Seconds Field Offset Register - High */
+#define XTIMER1588_RTC_OFFSET_SEC_H 0x0000C
+/* RTC Increment */
+#define XTIMER1588_RTC_INCREMENT 0x00010
+/* Current TOD Nanoseconds - RO */
+#define XTIMER1588_CURRENT_RTC_NS 0x00014
+/* Current TOD Seconds -Low RO */
+#define XTIMER1588_CURRENT_RTC_SEC_L 0x00018
+/* Current TOD Seconds -High RO */
+#define XTIMER1588_CURRENT_RTC_SEC_H 0x0001C
+#define XTIMER1588_SYNTONIZED_NS 0x0002C
+#define XTIMER1588_SYNTONIZED_SEC_L 0x00030
+#define XTIMER1588_SYNTONIZED_SEC_H 0x00034
+/* Write to Bit 0 to clear the interrupt */
+#define XTIMER1588_INTERRUPT 0x00020
+/* 8kHz Pulse Offset Register */
+#define XTIMER1588_8KPULSE 0x00024
+/* Correction Field - Low */
+#define XTIMER1588_CF_L 0x0002C
+/* Correction Field - Low */
+#define XTIMER1588_CF_H 0x00030
+
+#define XTIMER1588_RTC_MASK ((1 << 26) - 1)
+#define XTIMER1588_INT_SHIFT 0
+#define NANOSECOND_BITS 20
+#define NANOSECOND_MASK ((1 << NANOSECOND_BITS) - 1)
+#define SECOND_MASK ((1 << (32 - NANOSECOND_BITS)) - 1)
+#define XTIMER1588_RTC_INCREMENT_SHIFT 20
+#define PULSESIN1PPS 128
+
+/* Read/Write access to the registers */
+#ifndef out_be32
+#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_ARCH_ZYNQMP)
+#define in_be32(offset) __raw_readl(offset)
+#define out_be32(offset, val) __raw_writel(val, offset)
+#endif
+#endif
+
+/* The tsn ptp module will set this variable */
+extern int axienet_phc_index;
+
+void *axienet_ptp_timer_probe(void __iomem *base,
+ struct platform_device *pdev);
+int axienet_ptp_timer_remove(void *priv);
+int axienet_get_phc_index(void *priv);
+#endif
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 1d406c6df790..ea50f17d4cbc 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -456,6 +456,11 @@ config VITESSE_PHY
---help---
Currently supports the vsc8244
+config XILINX_PHY
+ tristate "Drivers for xilinx PHYs"
+ ---help---
+ This module provides a driver for the Xilinx PCS/PMA Core.
+
config XILINX_GMII2RGMII
tristate "Xilinx GMII2RGMII converter driver"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 5b5c8669499e..d2f2389688ae 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -90,4 +90,5 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_TERANETICS_PHY) += teranetics.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
+obj-$(CONFIG_XILINX_PHY) += xilinx_phy.o
obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 6eaec42fec05..f00605fdee84 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -21,11 +21,13 @@
#define MII_DP83867_PHYCTRL 0x10
#define MII_DP83867_MICR 0x12
#define MII_DP83867_ISR 0x13
+#define MII_DP83867_CFG2 0x14
+#define MII_DP83867_BISCR 0x16
#define DP83867_CTRL 0x1f
#define DP83867_CFG3 0x1e
/* Extended Registers */
-#define DP83867_CFG4 0x0031
+#define DP83867_CFG4 0x0031
#define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6))
#define DP83867_CFG4_SGMII_ANEG_TIMER_11MS (3 << 5)
#define DP83867_CFG4_SGMII_ANEG_TIMER_800US (2 << 5)
@@ -38,6 +40,7 @@
#define DP83867_IO_MUX_CFG 0x0170
#define DP83867_10M_SGMII_CFG 0x016F
#define DP83867_10M_SGMII_RATE_ADAPT_MASK BIT(7)
+#define DP83867_SGMIITYPE 0x00D3
#define DP83867_SW_RESET BIT(15)
#define DP83867_SW_RESTART BIT(14)
@@ -66,12 +69,35 @@
/* PHY CTRL bits */
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
+#define DP83867_MDI_CROSSOVER 5
+#define DP83867_MDI_CROSSOVER_AUTO 0b10
+#define DP83867_MDI_CROSSOVER_MDIX 0b01
+#define DP83867_PHYCTRL_SGMIIEN 0x0800
+#define DP83867_PHYCTRL_RXFIFO_SHIFT 12
+#define DP83867_PHYCTRL_TXFIFO_SHIFT 14
#define DP83867_PHYCR_RESERVED_MASK BIT(11)
#define DP83867_PHYCR_FORCE_LINK_GOOD BIT(10)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
+/* CFG2 bits */
+#define MII_DP83867_CFG2_SPEEDOPT_10EN 0x0040
+#define MII_DP83867_CFG2_SGMII_AUTONEGEN 0x0080
+#define MII_DP83867_CFG2_SPEEDOPT_ENH 0x0100
+#define MII_DP83867_CFG2_SPEEDOPT_CNT 0x0800
+#define MII_DP83867_CFG2_SPEEDOPT_INTLOW 0x2000
+#define MII_DP83867_CFG2_MASK 0x003F
+
+/* CFG4 bits */
+#define DP83867_CFG4_SGMII_AUTONEG_TIMER_MASK 0x60
+#define DP83867_CFG4_SGMII_AUTONEG_TIMER_16MS 0x00
+#define DP83867_CFG4_SGMII_AUTONEG_TIMER_2US 0x20
+#define DP83867_CFG4_SGMII_AUTONEG_TIMER_800US 0x40
+#define DP83867_CFG4_SGMII_AUTONEG_TIMER_11MS 0x60
+#define DP83867_CFG4_RESVDBIT7 BIT(7)
+#define DP83867_CFG4_RESVDBIT8 BIT(8)
+
/* IO_MUX_CFG bits */
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL 0x1f
@@ -86,6 +112,8 @@
/* CFG4 bits */
#define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
+/* SGMIICTL1 bits */
+#define DP83867_SGMIICLK_EN BIT(14)
enum {
DP83867_PORT_MIRROING_KEEP,
@@ -101,6 +129,7 @@ struct dp83867_private {
int port_mirroring;
bool rxctrl_strap_quirk;
int clk_output_sel;
+ bool wiremode_6;
};
static int dp83867_ack_interrupt(struct phy_device *phydev)
@@ -201,6 +230,8 @@ static int dp83867_of_init(struct phy_device *phydev)
if (of_property_read_bool(of_node, "enet-phy-lane-no-swap"))
dp83867->port_mirroring = DP83867_PORT_MIRROING_DIS;
+ dp83867->wiremode_6 = of_property_read_bool(of_node, "ti,6-wire-mode");
+
return of_property_read_u32(of_node, "ti,fifo-depth",
&dp83867->fifo_depth);
}
@@ -214,8 +245,8 @@ static int dp83867_of_init(struct phy_device *phydev)
static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867;
- int ret, val, bs;
- u16 delay;
+ int ret, bs;
+ u16 val, delay, cfg2;
if (!phydev->priv) {
dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
@@ -232,11 +263,19 @@ static int dp83867_config_init(struct phy_device *phydev)
}
/* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */
- if (dp83867->rxctrl_strap_quirk)
- phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
- BIT(7));
+ if (dp83867->rxctrl_strap_quirk) {
+ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4);
+ val &= ~BIT(7);
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val);
+ }
if (phy_interface_is_rgmii(phydev)) {
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL,
+ (DP83867_MDI_CROSSOVER_AUTO << DP83867_MDI_CROSSOVER) |
+ (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+ if (ret)
+ return ret;
+
val = phy_read(phydev, MII_DP83867_PHYCTRL);
if (val < 0)
return val;
@@ -261,32 +300,73 @@ static int dp83867_config_init(struct phy_device *phydev)
if (ret)
return ret;
- /* Set up RGMII delays */
- val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
+ } else {
+ /* Set SGMIICTL1 6-wire mode */
+ if (dp83867->wiremode_6)
+ phy_write_mmd(phydev, DP83867_DEVADDR,
+ DP83867_SGMIITYPE, DP83867_SGMIICLK_EN);
+
+ phy_write(phydev, MII_BMCR,
+ (BMCR_ANENABLE | BMCR_FULLDPLX | BMCR_SPEED1000));
+
+ cfg2 = phy_read(phydev, MII_DP83867_CFG2);
+ cfg2 &= MII_DP83867_CFG2_MASK;
+ cfg2 |= (MII_DP83867_CFG2_SPEEDOPT_10EN |
+ MII_DP83867_CFG2_SGMII_AUTONEGEN |
+ MII_DP83867_CFG2_SPEEDOPT_ENH |
+ MII_DP83867_CFG2_SPEEDOPT_CNT |
+ MII_DP83867_CFG2_SPEEDOPT_INTLOW);
+ phy_write(phydev, MII_DP83867_CFG2, cfg2);
+
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, 0x0);
+
+ phy_write(phydev, MII_DP83867_PHYCTRL,
+ DP83867_PHYCTRL_SGMIIEN |
+ (DP83867_MDI_CROSSOVER_MDIX << DP83867_MDI_CROSSOVER) |
+ (dp83867->fifo_depth << DP83867_PHYCTRL_RXFIFO_SHIFT) |
+ (dp83867->fifo_depth << DP83867_PHYCTRL_TXFIFO_SHIFT));
+ phy_write(phydev, MII_DP83867_BISCR, 0x0);
+
+ /* This is a SW workaround for link instability if
+ * RX_CTRL is not strapped to mode 3 or 4 in HW.
+ */
+ if (dp83867->rxctrl_strap_quirk) {
+ val = phy_read_mmd(phydev, DP83867_DEVADDR,
+ DP83867_CFG4);
+ val &= ~DP83867_CFG4_RESVDBIT7;
+ val |= DP83867_CFG4_RESVDBIT8;
+ val &= ~DP83867_CFG4_SGMII_AUTONEG_TIMER_MASK;
+ val |= DP83867_CFG4_SGMII_AUTONEG_TIMER_11MS;
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ val);
+ }
+ }
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
- val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
+ /* Set up RGMII delays */
+ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- val |= DP83867_RGMII_TX_CLK_DELAY_EN;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- val |= DP83867_RGMII_RX_CLK_DELAY_EN;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ val |= DP83867_RGMII_TX_CLK_DELAY_EN;
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val);
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ val |= DP83867_RGMII_RX_CLK_DELAY_EN;
- delay = (dp83867->rx_id_delay |
- (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, val);
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
- delay);
+ delay = (dp83867->rx_id_delay |
+ (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
- if (dp83867->io_impedance >= 0)
- phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG,
- DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
- dp83867->io_impedance &
- DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
- }
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL,
+ delay);
+
+ if (dp83867->io_impedance >= 0)
+ phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG,
+ DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
+ dp83867->io_impedance &
+ DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
/* For support SPEED_10 in SGMII mode
diff --git a/drivers/net/phy/xilinx_phy.c b/drivers/net/phy/xilinx_phy.c
new file mode 100644
index 000000000000..2410fa3a59ad
--- /dev/null
+++ b/drivers/net/phy/xilinx_phy.c
@@ -0,0 +1,160 @@
+/* Xilinx PCS/PMA Core phy driver
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Description:
+ * This driver is developed for PCS/PMA Core.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/xilinx_phy.h>
+
+#define MII_PHY_STATUS_SPD_MASK 0x0C00
+#define MII_PHY_STATUS_FULLDUPLEX 0x1000
+#define MII_PHY_STATUS_1000 0x0800
+#define MII_PHY_STATUS_100 0x0400
+#define XPCSPMA_PHY_CTRL_ISOLATE_DISABLE 0xFBFF
+
+static int xilinxphy_read_status(struct phy_device *phydev)
+{
+ int err;
+ int status = 0;
+
+ /* Update the link, but return if there
+ * was an error
+ */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ status = phy_read(phydev, MII_LPA);
+
+ if (status & MII_PHY_STATUS_FULLDUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ switch (status & MII_PHY_STATUS_SPD_MASK) {
+ case MII_PHY_STATUS_1000:
+ phydev->speed = SPEED_1000;
+ break;
+
+ case MII_PHY_STATUS_100:
+ phydev->speed = SPEED_100;
+ break;
+
+ default:
+ phydev->speed = SPEED_10;
+ break;
+ }
+ } else {
+ int bmcr = phy_read(phydev, MII_BMCR);
+
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (bmcr & BMCR_SPEED1000)
+ phydev->speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+ }
+
+ /* For 1000BASE-X Phy Mode the speed/duplex will always be
+ * 1000Mbps/fullduplex
+ */
+ if (phydev->dev_flags == XAE_PHY_TYPE_1000BASE_X) {
+ phydev->duplex = DUPLEX_FULL;
+ phydev->speed = SPEED_1000;
+ }
+
+ /* For 2500BASE-X Phy Mode the speed/duplex will always be
+ * 2500Mbps/fullduplex
+ */
+ if (phydev->dev_flags == XAE_PHY_TYPE_2500) {
+ phydev->duplex = DUPLEX_FULL;
+ phydev->speed = SPEED_2500;
+ }
+
+ return 0;
+}
+
+static int xilinxphy_of_init(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+ u32 phytype;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ if (!of_node)
+ return -ENODEV;
+
+ if (!of_property_read_u32(of_node, "xlnx,phy-type", &phytype)) {
+ if (phytype == XAE_PHY_TYPE_1000BASE_X)
+ phydev->dev_flags |= XAE_PHY_TYPE_1000BASE_X;
+ if (phytype == XAE_PHY_TYPE_2500)
+ phydev->dev_flags |= XAE_PHY_TYPE_2500;
+ }
+
+ return 0;
+}
+
+static int xilinxphy_config_init(struct phy_device *phydev)
+{
+ int temp;
+
+ xilinxphy_of_init(phydev);
+ temp = phy_read(phydev, MII_BMCR);
+ temp &= XPCSPMA_PHY_CTRL_ISOLATE_DISABLE;
+ phy_write(phydev, MII_BMCR, temp);
+
+ return 0;
+}
+
+static struct phy_driver xilinx_drivers[] = {
+ {
+ .phy_id = XILINX_PHY_ID,
+ .phy_id_mask = XILINX_PHY_ID_MASK,
+ .name = "Xilinx PCS/PMA PHY",
+ .features = PHY_GBIT_FEATURES,
+ .config_init = &xilinxphy_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &xilinxphy_read_status,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
+ },
+};
+
+module_phy_driver(xilinx_drivers);
+
+static struct mdio_device_id __maybe_unused xilinx_tbl[] = {
+ { XILINX_PHY_ID, XILINX_PHY_ID_MASK },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, xilinx_tbl);
+MODULE_DESCRIPTION("Xilinx PCS/PMA PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
index 5893543918c8..4de3a1ba6eec 100644
--- a/drivers/nvmem/zynqmp_nvmem.c
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2019 Xilinx, Inc.
+ * Copyright (C) 2017 - 2019 Xilinx, Inc.
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
@@ -10,40 +11,163 @@
#include <linux/firmware/xlnx-zynqmp.h>
#define SILICON_REVISION_MASK 0xF
+#define WORD_INBYTES (4)
+#define SOC_VER_SIZE (0x4)
+#define EFUSE_MEMORY_SIZE (0xF4)
+#define UNUSED_SPACE (0x8)
+#define ZYNQMP_NVMEM_SIZE (SOC_VER_SIZE + UNUSED_SPACE + \
+ EFUSE_MEMORY_SIZE)
+#define SOC_VERSION_OFFSET (0x0)
+#define EFUSE_START_OFFSET (0xC)
+#define EFUSE_END_OFFSET (0xFC)
+#define EFUSE_NOT_ENABLED (29)
+#define EFUSE_READ (0)
+#define EFUSE_WRITE (1)
-struct zynqmp_nvmem_data {
- struct device *dev;
- struct nvmem_device *nvmem;
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+/**
+ * struct xilinx_efuse - the basic structure
+ * @src: address of the buffer to store the data to be write/read
+ * @size: no of words to be read/write
+ * @offset: offset to be read/write`
+ * @flag: 0 - represents efuse read and 1- represents efuse write
+ *
+ * this structure stores all the required details to
+ * read/write efuse memory.
+ */
+struct xilinx_efuse {
+ u64 src;
+ u32 size;
+ u32 offset;
+ u32 flag;
+ u32 fullmap;
};
-static const struct zynqmp_eemi_ops *eemi_ops;
+static int zynqmp_efuse_access(void *context, unsigned int offset,
+ void *val, size_t bytes, unsigned int flag)
+{
+ size_t words = bytes / WORD_INBYTES;
+ struct device *dev = context;
+ dma_addr_t dma_addr, dma_buf;
+ struct xilinx_efuse *efuse;
+ char *data;
+ int ret;
+
+ if (!eemi_ops->efuse_access)
+ return -ENXIO;
+
+ if (bytes % WORD_INBYTES != 0) {
+ dev_err(dev,
+ "ERROR: Bytes requested should be word aligned\n\r");
+ return -ENOTSUPP;
+ }
+ if (offset % WORD_INBYTES != 0) {
+ dev_err(dev,
+ "ERROR: offset requested should be word aligned\n\r");
+ return -ENOTSUPP;
+ }
+
+ efuse = dma_alloc_coherent(dev, sizeof(struct xilinx_efuse),
+ &dma_addr, GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ data = dma_alloc_coherent(dev, sizeof(bytes),
+ &dma_buf, GFP_KERNEL);
+ if (!data) {
+ dma_free_coherent(dev, sizeof(struct xilinx_efuse),
+ efuse, dma_addr);
+ return -ENOMEM;
+ }
+
+ if (flag == EFUSE_WRITE) {
+ memcpy(data, val, bytes);
+ efuse->flag = EFUSE_WRITE;
+ } else {
+ efuse->flag = EFUSE_READ;
+ }
+
+ efuse->src = dma_buf;
+ efuse->size = words;
+ efuse->offset = offset;
+
+ eemi_ops->efuse_access(dma_addr, &ret);
+ if (ret != 0) {
+ if (ret == EFUSE_NOT_ENABLED) {
+ dev_err(dev, "ERROR: efuse access is not enabled\n\r");
+ ret = -ENOTSUPP;
+ goto END;
+ }
+ dev_err(dev, "ERROR: in efuse read %x\n\r", ret);
+ ret = -EPERM;
+ goto END;
+ }
+
+ if (flag == EFUSE_READ)
+ memcpy(val, data, bytes);
+END:
+
+ dma_free_coherent(dev, sizeof(struct xilinx_efuse),
+ efuse, dma_addr);
+ dma_free_coherent(dev, sizeof(bytes),
+ data, dma_buf);
+
+ return ret;
+}
static int zynqmp_nvmem_read(void *context, unsigned int offset,
- void *val, size_t bytes)
+ void *val, size_t bytes)
{
int ret;
int idcode, version;
- struct zynqmp_nvmem_data *priv = context;
if (!eemi_ops->get_chipid)
return -ENXIO;
- ret = eemi_ops->get_chipid(&idcode, &version);
- if (ret < 0)
- return ret;
+ switch (offset) {
+ /* Soc version offset is zero */
+ case SOC_VERSION_OFFSET:
+ if (bytes != SOC_VER_SIZE)
+ return -ENOTSUPP;
+
+ ret = eemi_ops->get_chipid(&idcode, &version);
+ if (ret < 0)
+ return ret;
+
+ pr_debug("Read chipid val %x %x\n", idcode, version);
+ *(int *)val = version & SILICON_REVISION_MASK;
+ break;
+ /* Efuse offset starts from 0xc */
+ case EFUSE_START_OFFSET ... EFUSE_END_OFFSET:
+ ret = zynqmp_efuse_access(context, offset, val,
+ bytes, EFUSE_READ);
+ break;
+ default:
+ *(u32 *)val = 0xDEADBEEF;
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
- dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version);
- *(int *)val = version & SILICON_REVISION_MASK;
+static int zynqmp_nvmem_write(void *context,
+ unsigned int offset, void *val, size_t bytes)
+{
+ /* Efuse offset starts from 0xc */
+ if (offset < EFUSE_START_OFFSET)
+ return -ENOTSUPP;
- return 0;
+ return(zynqmp_efuse_access(context, offset,
+ val, bytes, EFUSE_WRITE));
}
static struct nvmem_config econfig = {
.name = "zynqmp-nvmem",
.owner = THIS_MODULE,
.word_size = 1,
- .size = 1,
- .read_only = true,
+ .size = ZYNQMP_NVMEM_SIZE,
};
static const struct of_device_id zynqmp_nvmem_match[] = {
@@ -54,29 +178,37 @@ MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match);
static int zynqmp_nvmem_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct zynqmp_nvmem_data *priv;
-
- priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ struct nvmem_device *nvmem;
eemi_ops = zynqmp_pm_get_eemi_ops();
if (IS_ERR(eemi_ops))
return PTR_ERR(eemi_ops);
- priv->dev = dev;
- econfig.dev = dev;
+ econfig.dev = &pdev->dev;
+ econfig.priv = &pdev->dev;
econfig.reg_read = zynqmp_nvmem_read;
- econfig.priv = priv;
+ econfig.reg_write = zynqmp_nvmem_write;
+
+ nvmem = nvmem_register(&econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
- priv->nvmem = devm_nvmem_register(dev, &econfig);
+ platform_set_drvdata(pdev, nvmem);
- return PTR_ERR_OR_ZERO(priv->nvmem);
+ return 0;
+}
+
+static int zynqmp_nvmem_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ nvmem_unregister(nvmem);
+ return 0;
}
static struct platform_driver zynqmp_nvmem_driver = {
.probe = zynqmp_nvmem_probe,
+ .remove = zynqmp_nvmem_remove,
.driver = {
.name = "zynqmp-nvmem",
.of_match_table = zynqmp_nvmem_match,
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index d91618641be6..e96c312216db 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -103,6 +103,13 @@ config OF_OVERLAY
config OF_NUMA
bool
+config OF_CONFIGFS
+ bool "Device Tree Overlay ConfigFS interface"
+ select CONFIGFS_FS
+ depends on OF_OVERLAY
+ help
+ Enable a simple user-space driven DT overlay interface.
+
config OF_DMA_DEFAULT_COHERENT
# arches should select this if DMA is coherent by default for OF devices
bool
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 663a4af0cccd..7f43d8b95017 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-y = base.o device.o platform.o property.o
+obj-$(CONFIG_OF_CONFIGFS) += configfs.o
obj-$(CONFIG_OF_KOBJ) += kobj.o
obj-$(CONFIG_OF_DYNAMIC) += dynamic.o
obj-$(CONFIG_OF_FLATTREE) += fdt.o
diff --git a/drivers/of/configfs.c b/drivers/of/configfs.c
new file mode 100644
index 000000000000..f18f7d5a8146
--- /dev/null
+++ b/drivers/of/configfs.c
@@ -0,0 +1,293 @@
+/*
+ * Configfs entries for device-tree
+ *
+ * Copyright (C) 2013 - Pantelis Antoniou <panto@antoniou-consulting.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/ctype.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/spinlock.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/configfs.h>
+#include <linux/types.h>
+#include <linux/stat.h>
+#include <linux/limits.h>
+#include <linux/file.h>
+#include <linux/vmalloc.h>
+#include <linux/firmware.h>
+
+#include "of_private.h"
+
+struct cfs_overlay_item {
+ struct config_item item;
+
+ char path[PATH_MAX];
+
+ const struct firmware *fw;
+ struct device_node *overlay;
+ int ov_id;
+
+ void *dtbo;
+ int dtbo_size;
+
+ void *mem;
+};
+
+static DEFINE_MUTEX(overlay_lock);
+
+static int create_overlay(struct cfs_overlay_item *overlay, void *blob)
+{
+ int err;
+
+ /* FIXME */
+ err = of_overlay_fdt_apply(blob, overlay->dtbo_size, &overlay->ov_id);
+ if (err < 0) {
+ pr_err("%s: Failed to create overlay (err=%d)\n",
+ __func__, err);
+ return err;
+ }
+
+ return err;
+}
+
+static inline struct cfs_overlay_item *to_cfs_overlay_item(
+ struct config_item *item)
+{
+ return item ? container_of(item, struct cfs_overlay_item, item) : NULL;
+}
+
+static ssize_t cfs_overlay_item_path_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%s\n", to_cfs_overlay_item(item)->path);
+}
+
+static ssize_t cfs_overlay_item_path_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+ const char *p = page;
+ char *s;
+ int err;
+
+ /* if it's set do not allow changes */
+ if (overlay->path[0] != '\0' || overlay->dtbo_size > 0)
+ return -EPERM;
+
+ /* copy to path buffer (and make sure it's always zero terminated */
+ count = snprintf(overlay->path, sizeof(overlay->path) - 1, "%s", p);
+ overlay->path[sizeof(overlay->path) - 1] = '\0';
+
+ /* strip trailing newlines */
+ s = overlay->path + strlen(overlay->path);
+ while (s > overlay->path && *--s == '\n')
+ *s = '\0';
+
+ pr_debug("%s: path is '%s'\n", __func__, overlay->path);
+
+ err = request_firmware(&overlay->fw, overlay->path, NULL);
+ if (err != 0)
+ goto out_err;
+
+ overlay->dtbo_size = overlay->fw->size;
+ err = create_overlay(overlay, (void *)overlay->fw->data);
+ if (err < 0)
+ goto out_err;
+
+ return count;
+
+out_err:
+
+ release_firmware(overlay->fw);
+ overlay->fw = NULL;
+
+ overlay->path[0] = '\0';
+
+ return count;
+}
+
+static ssize_t cfs_overlay_item_status_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%s\n", to_cfs_overlay_item(item)->ov_id >= 0 ?
+ "applied" : "unapplied");
+}
+
+CONFIGFS_ATTR(cfs_overlay_item_, path);
+CONFIGFS_ATTR_RO(cfs_overlay_item_, status);
+
+static struct configfs_attribute *cfs_overlay_attrs[] = {
+ &cfs_overlay_item_attr_path,
+ &cfs_overlay_item_attr_status,
+ NULL,
+};
+
+ssize_t cfs_overlay_item_dtbo_read(struct config_item *item, void *buf,
+ size_t max_count)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+ pr_debug("%s: buf=%p max_count=%zu\n", __func__,
+ buf, max_count);
+
+ if (overlay->dtbo == NULL)
+ return 0;
+
+ /* copy if buffer provided */
+ if (buf != NULL) {
+ /* the buffer must be large enough */
+ if (overlay->dtbo_size > max_count)
+ return -ENOSPC;
+
+ memcpy(buf, overlay->dtbo, overlay->dtbo_size);
+ }
+
+ return overlay->dtbo_size;
+}
+
+ssize_t cfs_overlay_item_dtbo_write(struct config_item *item, const void *buf,
+ size_t count)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+ int err;
+
+ /* if it's set do not allow changes */
+ if (overlay->path[0] != '\0' || overlay->dtbo_size > 0)
+ return -EPERM;
+
+ /* copy the contents */
+ overlay->dtbo = kmemdup(buf, count, GFP_KERNEL);
+ if (overlay->dtbo == NULL)
+ return -ENOMEM;
+
+ overlay->dtbo_size = count;
+
+ err = create_overlay(overlay, overlay->dtbo);
+ if (err < 0)
+ goto out_err;
+
+ return count;
+
+out_err:
+ kfree(overlay->dtbo);
+ overlay->dtbo = NULL;
+ overlay->dtbo_size = 0;
+
+ return err;
+}
+
+CONFIGFS_BIN_ATTR(cfs_overlay_item_, dtbo, NULL, SZ_1M);
+
+static struct configfs_bin_attribute *cfs_overlay_bin_attrs[] = {
+ &cfs_overlay_item_attr_dtbo,
+ NULL,
+};
+
+static void cfs_overlay_release(struct config_item *item)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+ if (overlay->ov_id >= 0)
+ of_overlay_remove(&overlay->ov_id);
+ if (overlay->fw)
+ release_firmware(overlay->fw);
+ /* kfree with NULL is safe */
+ kfree(overlay->dtbo);
+ kfree(overlay->mem);
+ kfree(overlay);
+}
+
+static struct configfs_item_operations cfs_overlay_item_ops = {
+ .release = cfs_overlay_release,
+};
+
+static struct config_item_type cfs_overlay_type = {
+ .ct_item_ops = &cfs_overlay_item_ops,
+ .ct_attrs = cfs_overlay_attrs,
+ .ct_bin_attrs = cfs_overlay_bin_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item *cfs_overlay_group_make_item(
+ struct config_group *group, const char *name)
+{
+ struct cfs_overlay_item *overlay;
+
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+ if (!overlay)
+ return ERR_PTR(-ENOMEM);
+ overlay->ov_id = -1;
+
+ config_item_init_type_name(&overlay->item, name, &cfs_overlay_type);
+ return &overlay->item;
+}
+
+static void cfs_overlay_group_drop_item(struct config_group *group,
+ struct config_item *item)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+ config_item_put(&overlay->item);
+}
+
+static struct configfs_group_operations overlays_ops = {
+ .make_item = cfs_overlay_group_make_item,
+ .drop_item = cfs_overlay_group_drop_item,
+};
+
+static struct config_item_type overlays_type = {
+ .ct_group_ops = &overlays_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_group_operations of_cfs_ops = {
+ /* empty - we don't allow anything to be created */
+};
+
+static struct config_item_type of_cfs_type = {
+ .ct_group_ops = &of_cfs_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+struct config_group of_cfs_overlay_group;
+
+static struct configfs_subsystem of_cfs_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "device-tree",
+ .ci_type = &of_cfs_type,
+ },
+ },
+ .su_mutex = __MUTEX_INITIALIZER(of_cfs_subsys.su_mutex),
+};
+
+static int __init of_cfs_init(void)
+{
+ int ret;
+
+ pr_info("%s\n", __func__);
+
+ config_group_init(&of_cfs_subsys.su_group);
+ config_group_init_type_name(&of_cfs_overlay_group, "overlays",
+ &overlays_type);
+ configfs_add_default_group(&of_cfs_overlay_group,
+ &of_cfs_subsys.su_group);
+
+ ret = configfs_register_subsystem(&of_cfs_subsys);
+ if (ret != 0) {
+ pr_err("%s: failed to register subsys\n", __func__);
+ goto out;
+ }
+ pr_info("%s: OK\n", __func__);
+out:
+ return ret;
+}
+late_initcall(of_cfs_init);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 49b16f76d78e..63a163dd7cee 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -927,3 +927,176 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action,
return 0;
}
EXPORT_SYMBOL_GPL(of_changeset_action);
+
+/* changeset helpers */
+
+/**
+ * __of_changeset_add_property_copy - Create/update a new property copying
+ * name & value
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @value: pointer to the value data
+ * @length: length of the value in bytes
+ * @update: True on update operation
+ *
+ * Adds/updates a property to the changeset by making copies of the name & value
+ * entries. The @update parameter controls whether an add or update takes place.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+int __of_changeset_add_update_property_copy(struct of_changeset *ocs,
+ struct device_node *np, const char *name, const void *value,
+ int length, bool update)
+{
+ struct property *prop;
+ char *new_name;
+ void *new_value;
+ int ret = -ENOMEM;
+
+ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+
+ new_name = kstrdup(name, GFP_KERNEL);
+ if (!new_name)
+ goto out_err;
+
+ /*
+ * NOTE: There is no check for zero length value.
+ * In case of a boolean property, this will allocate a value
+ * of zero bytes. We do this to work around the use
+ * of of_get_property() calls on boolean values.
+ */
+ new_value = kmemdup(value, length, GFP_KERNEL);
+ if (!new_value)
+ goto out_err;
+
+ of_property_set_flag(prop, OF_DYNAMIC);
+
+ prop->name = new_name;
+ prop->value = new_value;
+ prop->length = length;
+
+ if (!update)
+ ret = of_changeset_add_property(ocs, np, prop);
+ else
+ ret = of_changeset_update_property(ocs, np, prop);
+
+ if (!ret)
+ return 0;
+
+out_err:
+ kfree(prop->value);
+ kfree(prop->name);
+ kfree(prop);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__of_changeset_add_update_property_copy);
+
+/**
+ * of_changeset_add_property_stringf - Create a new formatted string property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @fmt: format of string property
+ * ... arguments of the format string
+ *
+ * Adds a string property to the changeset by making copies of the name
+ * and the formatted value.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+__printf(4, 5) int of_changeset_add_property_stringf(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char *fmt, ...)
+{
+ va_list vargs;
+ int ret;
+
+ va_start(vargs, fmt);
+ ret = __of_changeset_add_update_property_stringv(ocs, np, name, fmt,
+ vargs, false);
+ va_end(vargs);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_changeset_add_property_stringf);
+
+/**
+ * of_changeset_update_property_stringf - Update formatted string property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @fmt: format of string property
+ * ... arguments of the format string
+ *
+ * Updates a string property to the changeset by making copies of the name
+ * and the formatted value.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+int of_changeset_update_property_stringf(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char *fmt, ...)
+{
+ va_list vargs;
+ int ret;
+
+ va_start(vargs, fmt);
+ ret = __of_changeset_add_update_property_stringv(ocs, np, name, fmt,
+ vargs, true);
+ va_end(vargs);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_changeset_update_property_stringf);
+
+/**
+ * __of_changeset_add_update_property_string_list - Create/update a string
+ * list property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @strs: pointer to the string list
+ * @count: string count
+ * @update: True on update operation
+ *
+ * Adds a string list property to the changeset.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+int __of_changeset_add_update_property_string_list(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char **strs, int count, bool update)
+{
+ int total = 0, i, ret;
+ char *value, *s;
+
+ for (i = 0; i < count; i++) {
+ /* check if it's NULL */
+ if (!strs[i])
+ return -EINVAL;
+ total += strlen(strs[i]) + 1;
+ }
+
+ value = kmalloc(total, GFP_KERNEL);
+ if (!value)
+ return -ENOMEM;
+
+ for (i = 0, s = value; i < count; i++) {
+ /* no need to check for NULL, check above */
+ strcpy(s, strs[i]);
+ s += strlen(strs[i]) + 1;
+ }
+
+ ret = __of_changeset_add_update_property_copy(ocs, np, name, value,
+ total, update);
+
+ kfree(value);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__of_changeset_add_update_property_string_list);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 297bf928d652..6a170bd8e024 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -52,7 +52,7 @@ config PCI_MSI
If you don't know what to do here, say Y.
config PCI_MSI_IRQ_DOMAIN
- def_bool ARC || ARM || ARM64 || X86
+ def_bool ARC || ARM || ARM64 || X86 || MICROBLAZE
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 011c57cae4b0..af567475adb5 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -109,6 +109,14 @@ config PCIE_XILINX
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
+config PCIE_XDMA_PL
+ bool "Xilinx XDMA PL PCIe host bridge support"
+ depends on ARCH_ZYNQMP || MICROBLAZE
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Say 'Y' here if you want kernel to enable support the
+ Xilinx XDMA PL PCIe Host Bridge driver.
+
config PCI_XGENE
bool "X-Gene PCIe controller"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index d56a507495c5..3803e8abc703 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o
obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
+obj-$(CONFIG_PCIE_XDMA_PL) += pcie-xdma-pl.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
diff --git a/drivers/pci/controller/pcie-xdma-pl.c b/drivers/pci/controller/pcie-xdma-pl.c
new file mode 100644
index 000000000000..108dcb02337e
--- /dev/null
+++ b/drivers/pci/controller/pcie-xdma-pl.c
@@ -0,0 +1,811 @@
+/*
+ * PCIe host controller driver for Xilinx XDMA PCIe Bridge
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include "../pci.h"
+#include <linux/irqchip/chained_irq.h>
+
+/* Register definitions */
+#define XILINX_PCIE_REG_VSEC 0x0000012c
+#define XILINX_PCIE_REG_BIR 0x00000130
+#define XILINX_PCIE_REG_IDR 0x00000138
+#define XILINX_PCIE_REG_IMR 0x0000013c
+#define XILINX_PCIE_REG_PSCR 0x00000144
+#define XILINX_PCIE_REG_RPSC 0x00000148
+#define XILINX_PCIE_REG_MSIBASE1 0x0000014c
+#define XILINX_PCIE_REG_MSIBASE2 0x00000150
+#define XILINX_PCIE_REG_RPEFR 0x00000154
+#define XILINX_PCIE_REG_RPIFR1 0x00000158
+#define XILINX_PCIE_REG_RPIFR2 0x0000015c
+#define XILINX_PCIE_REG_IDRN 0x00000160
+#define XILINX_PCIE_REG_IDRN_MASK 0x00000164
+#define XILINX_PCIE_REG_MSI_LOW 0x00000170
+#define XILINX_PCIE_REG_MSI_HI 0x00000174
+#define XILINX_PCIE_REG_MSI_LOW_MASK 0x00000178
+#define XILINX_PCIE_REG_MSI_HI_MASK 0x0000017c
+
+/* Interrupt registers definitions */
+#define XILINX_PCIE_INTR_LINK_DOWN BIT(0)
+#define XILINX_PCIE_INTR_ECRC_ERR BIT(1)
+#define XILINX_PCIE_INTR_STR_ERR BIT(2)
+#define XILINX_PCIE_INTR_HOT_RESET BIT(3)
+#define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8)
+#define XILINX_PCIE_INTR_CORRECTABLE BIT(9)
+#define XILINX_PCIE_INTR_NONFATAL BIT(10)
+#define XILINX_PCIE_INTR_FATAL BIT(11)
+#define XILINX_PCIE_INTR_INTX BIT(16)
+#define XILINX_PCIE_INTR_MSI BIT(17)
+#define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20)
+#define XILINX_PCIE_INTR_SLV_UNEXP BIT(21)
+#define XILINX_PCIE_INTR_SLV_COMPL BIT(22)
+#define XILINX_PCIE_INTR_SLV_ERRP BIT(23)
+#define XILINX_PCIE_INTR_SLV_CMPABT BIT(24)
+#define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25)
+#define XILINX_PCIE_INTR_MST_DECERR BIT(26)
+#define XILINX_PCIE_INTR_MST_SLVERR BIT(27)
+#define XILINX_PCIE_INTR_MST_ERRP BIT(28)
+#define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED
+#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF
+#define XILINX_PCIE_IDRN_MASK GENMASK(19, 16)
+
+/* Root Port Error FIFO Read Register definitions */
+#define XILINX_PCIE_RPEFR_ERR_VALID BIT(18)
+#define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
+#define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
+
+/* Root Port Interrupt FIFO Read Register 1 definitions */
+#define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31)
+#define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30)
+#define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27)
+#define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF
+#define XILINX_PCIE_RPIFR1_INTR_SHIFT 27
+#define XILINX_PCIE_IDRN_SHIFT 16
+#define XILINX_PCIE_VSEC_REV_MASK GENMASK(19, 16)
+#define XILINX_PCIE_VSEC_REV_SHIFT 16
+#define XILINX_PCIE_FIFO_SHIFT 5
+
+/* Bridge Info Register definitions */
+#define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16)
+#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16
+
+/* Root Port Interrupt FIFO Read Register 2 definitions */
+#define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0)
+
+/* Root Port Status/control Register definitions */
+#define XILINX_PCIE_REG_RPSC_BEN BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11)
+
+/* ECAM definitions */
+#define ECAM_BUS_NUM_SHIFT 20
+#define ECAM_DEV_NUM_SHIFT 12
+
+/* Number of MSI IRQs */
+#define XILINX_NUM_MSI_IRQS 64
+#define INTX_NUM 4
+
+enum msi_mode {
+ MSI_DECD_MODE = 1,
+ MSI_FIFO_MODE,
+};
+
+struct xilinx_msi {
+ struct irq_domain *msi_domain;
+ unsigned long *bitmap;
+ struct irq_domain *dev_domain;
+ struct mutex lock; /* protect bitmap variable */
+ unsigned long msi_pages;
+ int irq_msi0;
+ int irq_msi1;
+};
+
+/**
+ * struct xilinx_pcie_port - PCIe port information
+ * @reg_base: IO Mapped Register Base
+ * @irq: Interrupt number
+ * @root_busno: Root Bus number
+ * @dev: Device pointer
+ * @leg_domain: Legacy IRQ domain pointer
+ * @resources: Bus Resources
+ * @msi: MSI information
+ * @irq_misc: Legacy and error interrupt number
+ * @msi_mode: MSI mode
+ */
+struct xilinx_pcie_port {
+ void __iomem *reg_base;
+ u32 irq;
+ u8 root_busno;
+ struct device *dev;
+ struct irq_domain *leg_domain;
+ struct list_head resources;
+ struct xilinx_msi msi;
+ int irq_misc;
+ u8 msi_mode;
+};
+
+static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
+{
+ return readl(port->reg_base + reg);
+}
+
+static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
+{
+ writel(val, port->reg_base + reg);
+}
+
+static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
+{
+ return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
+ XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
+}
+
+/**
+ * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
+{
+ unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
+
+ if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
+ dev_dbg(port->dev, "Requester ID %lu\n",
+ val & XILINX_PCIE_RPEFR_REQ_ID);
+ pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
+ XILINX_PCIE_REG_RPEFR);
+ }
+}
+
+/**
+ * xilinx_pcie_valid_device - Check if a valid device is present on bus
+ * @bus: PCI Bus structure
+ * @devfn: device/function
+ *
+ * Return: 'true' on success and 'false' if invalid device is found
+ */
+static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct xilinx_pcie_port *port = bus->sysdata;
+
+ /* Check if link is up when trying to access downstream ports */
+ if (bus->number != port->root_busno)
+ if (!xilinx_pcie_link_is_up(port))
+ return false;
+
+ /* Only one device down on each root port */
+ if (bus->number == port->root_busno && devfn > 0)
+ return false;
+
+ return true;
+}
+
+/**
+ * xilinx_pcie_map_bus - Get configuration base
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ *
+ * Return: Base address of the configuration space needed to be
+ * accessed.
+ */
+static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct xilinx_pcie_port *port = bus->sysdata;
+ int relbus;
+
+ if (!xilinx_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
+ (devfn << ECAM_DEV_NUM_SHIFT);
+
+ return port->reg_base + relbus + where;
+}
+
+/* PCIe operations */
+static struct pci_ops xilinx_pcie_ops = {
+ .map_bus = xilinx_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+/**
+ * xilinx_pcie_enable_msi - Enable MSI support
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+{
+ struct xilinx_msi *msi = &port->msi;
+ phys_addr_t msg_addr;
+
+ msi->msi_pages = __get_free_pages(GFP_KERNEL, 0);
+ msg_addr = virt_to_phys((void *)msi->msi_pages);
+ pcie_write(port, upper_32_bits(msg_addr), XILINX_PCIE_REG_MSIBASE1);
+ pcie_write(port, lower_32_bits(msg_addr), XILINX_PCIE_REG_MSIBASE2);
+}
+
+/**
+ * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = xilinx_pcie_intx_map,
+ .xlate = pci_irqd_intx_xlate,
+};
+
+static void xilinx_pcie_handle_msi_irq(struct xilinx_pcie_port *port,
+ u32 status_reg)
+{
+ struct xilinx_msi *msi;
+ unsigned long status;
+ u32 bit;
+ u32 virq;
+
+ msi = &port->msi;
+
+ while ((status = pcie_read(port, status_reg)) != 0) {
+ for_each_set_bit(bit, &status, 32) {
+ pcie_write(port, 1 << bit, status_reg);
+ if (status_reg == XILINX_PCIE_REG_MSI_HI)
+ bit = bit + 32;
+ virq = irq_find_mapping(msi->dev_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ }
+}
+
+static void xilinx_pcie_msi_handler_high(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct xilinx_pcie_port *port = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+ xilinx_pcie_handle_msi_irq(port, XILINX_PCIE_REG_MSI_HI);
+ chained_irq_exit(chip, desc);
+}
+
+static void xilinx_pcie_msi_handler_low(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct xilinx_pcie_port *port = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+ xilinx_pcie_handle_msi_irq(port, XILINX_PCIE_REG_MSI_LOW);
+ chained_irq_exit(chip, desc);
+}
+
+/**
+ * xilinx_pcie_intr_handler - Interrupt Service Handler
+ * @irq: IRQ number
+ * @data: PCIe port information
+ *
+ * Return: IRQ_HANDLED on success and IRQ_NONE on failure
+ */
+static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
+{
+ struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
+ u32 val, mask, status, msi_data, bit;
+ unsigned long intr_val;
+
+ /* Read interrupt decode and mask registers */
+ val = pcie_read(port, XILINX_PCIE_REG_IDR);
+ mask = pcie_read(port, XILINX_PCIE_REG_IMR);
+
+ status = val & mask;
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XILINX_PCIE_INTR_LINK_DOWN)
+ dev_warn(port->dev, "Link Down\n");
+
+ if (status & XILINX_PCIE_INTR_ECRC_ERR)
+ dev_warn(port->dev, "ECRC failed\n");
+
+ if (status & XILINX_PCIE_INTR_STR_ERR)
+ dev_warn(port->dev, "Streaming error\n");
+
+ if (status & XILINX_PCIE_INTR_HOT_RESET)
+ dev_info(port->dev, "Hot reset\n");
+
+ if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
+ dev_warn(port->dev, "ECAM access timeout\n");
+
+ if (status & XILINX_PCIE_INTR_CORRECTABLE) {
+ dev_warn(port->dev, "Correctable error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_NONFATAL) {
+ dev_warn(port->dev, "Non fatal error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_FATAL) {
+ dev_warn(port->dev, "Fatal error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_INTX) {
+ /* Handle INTx Interrupt */
+ intr_val = pcie_read(port, XILINX_PCIE_REG_IDRN);
+ intr_val = intr_val >> XILINX_PCIE_IDRN_SHIFT;
+
+ for_each_set_bit(bit, &intr_val, INTX_NUM)
+ generic_handle_irq(irq_find_mapping(port->leg_domain,
+ bit));
+ }
+
+ if (port->msi_mode == MSI_FIFO_MODE &&
+ (status & XILINX_PCIE_INTR_MSI)) {
+ /* MSI Interrupt */
+ val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
+
+ if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
+ dev_warn(port->dev, "RP Intr FIFO1 read error\n");
+ goto error;
+ }
+
+ if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
+ msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
+ XILINX_PCIE_RPIFR2_MSG_DATA;
+
+ /* Clear interrupt FIFO register 1 */
+ pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+ XILINX_PCIE_REG_RPIFR1);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ /* Handle MSI Interrupt */
+ val = irq_find_mapping(port->msi.dev_domain,
+ msi_data);
+ if (val)
+ generic_handle_irq(val);
+ }
+ }
+ }
+
+ if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
+ dev_warn(port->dev, "Slave unsupported request\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_UNEXP)
+ dev_warn(port->dev, "Slave unexpected completion\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_COMPL)
+ dev_warn(port->dev, "Slave completion timeout\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_ERRP)
+ dev_warn(port->dev, "Slave Error Poison\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_CMPABT)
+ dev_warn(port->dev, "Slave Completer Abort\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
+ dev_warn(port->dev, "Slave Illegal Burst\n");
+
+ if (status & XILINX_PCIE_INTR_MST_DECERR)
+ dev_warn(port->dev, "Master decode error\n");
+
+ if (status & XILINX_PCIE_INTR_MST_SLVERR)
+ dev_warn(port->dev, "Master slave error\n");
+
+ if (status & XILINX_PCIE_INTR_MST_ERRP)
+ dev_warn(port->dev, "Master error poison\n");
+
+error:
+ /* Clear the Interrupt Decode register */
+ pcie_write(port, status, XILINX_PCIE_REG_IDR);
+
+ return IRQ_HANDLED;
+}
+
+static struct irq_chip xilinx_msi_irq_chip = {
+ .name = "xilinx_pcie:msi",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info xilinx_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &xilinx_msi_irq_chip,
+};
+
+static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct xilinx_pcie_port *pcie = irq_data_get_irq_chip_data(data);
+ struct xilinx_msi *msi = &pcie->msi;
+ phys_addr_t msi_addr;
+
+ msi_addr = virt_to_phys((void *)msi->msi_pages);
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->data = data->hwirq;
+}
+
+static int xilinx_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip xilinx_irq_chip = {
+ .name = "Xilinx MSI",
+ .irq_compose_msi_msg = xilinx_compose_msi_msg,
+ .irq_set_affinity = xilinx_msi_set_affinity,
+};
+
+static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct xilinx_pcie_port *pcie = domain->host_data;
+ struct xilinx_msi *msi = &pcie->msi;
+ int bit, tst_bit;
+ int i;
+
+ mutex_lock(&msi->lock);
+ bit = bitmap_find_next_zero_area(msi->bitmap, XILINX_NUM_MSI_IRQS, 0,
+ nr_irqs, 0);
+ if (bit >= XILINX_NUM_MSI_IRQS) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ if ((bit % nr_irqs) == 0) {
+ bit = bit;
+ } else if (nr_irqs > 1) {
+ tst_bit = bit & ((1 << ilog2(nr_irqs)) - 1);
+ bit = bit - tst_bit;
+ bit = bit + nr_irqs;
+ }
+
+ bitmap_set(msi->bitmap, bit, nr_irqs);
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, bit + i, &xilinx_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
+ mutex_unlock(&msi->lock);
+ return 0;
+}
+
+static void xilinx_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+ struct xilinx_pcie_port *pcie = irq_data_get_irq_chip_data(data);
+ struct xilinx_msi *msi = &pcie->msi;
+
+ mutex_lock(&msi->lock);
+ bitmap_clear(msi->bitmap, data->hwirq, nr_irqs);
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops dev_msi_domain_ops = {
+ .alloc = xilinx_irq_domain_alloc,
+ .free = xilinx_irq_domain_free,
+};
+
+static int xilinx_pcie_init_msi_irq_domain(struct xilinx_pcie_port *port)
+{
+ struct fwnode_handle *fwnode = of_node_to_fwnode(port->dev->of_node);
+ struct xilinx_msi *msi = &port->msi;
+ int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
+
+ msi->dev_domain = irq_domain_add_linear(NULL, XILINX_NUM_MSI_IRQS,
+ &dev_msi_domain_ops, port);
+ if (!msi->dev_domain) {
+ dev_err(port->dev, "failed to create dev IRQ domain\n");
+ return -ENOMEM;
+ }
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &xilinx_msi_domain_info,
+ msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(port->dev, "failed to create msi IRQ domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+
+ mutex_init(&msi->lock);
+ msi->bitmap = kzalloc(size, GFP_KERNEL);
+ if (!msi->bitmap)
+ return -ENOMEM;
+
+ xilinx_pcie_enable_msi(port);
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_init_irq_domain - Initialize IRQ domain
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return PTR_ERR(pcie_intc_node);
+ }
+
+ port->leg_domain = irq_domain_add_linear(pcie_intc_node, INTX_NUM,
+ &intx_domain_ops,
+ port);
+ if (!port->leg_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return PTR_ERR(port->leg_domain);
+ }
+
+ xilinx_pcie_init_msi_irq_domain(port);
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_init_port - Initialize hardware
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
+{
+ if (xilinx_pcie_link_is_up(port))
+ dev_info(port->dev, "PCIe Link is UP\n");
+ else
+ dev_info(port->dev, "PCIe Link is DOWN\n");
+
+ /* Disable all interrupts */
+ pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
+ XILINX_PCIE_REG_IMR);
+
+ /* Clear pending interrupts */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
+ XILINX_PCIE_IMR_ALL_MASK,
+ XILINX_PCIE_REG_IDR);
+
+ /* Enable all interrupts */
+ pcie_write(port, XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IMR);
+ pcie_write(port, XILINX_PCIE_IDRN_MASK, XILINX_PCIE_REG_IDRN_MASK);
+ if (port->msi_mode == MSI_DECD_MODE) {
+ pcie_write(port, XILINX_PCIE_IDR_ALL_MASK,
+ XILINX_PCIE_REG_MSI_LOW_MASK);
+ pcie_write(port, XILINX_PCIE_IDR_ALL_MASK,
+ XILINX_PCIE_REG_MSI_HI_MASK);
+ }
+ /* Enable the Bridge enable bit */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
+ XILINX_PCIE_REG_RPSC_BEN,
+ XILINX_PCIE_REG_RPSC);
+}
+
+/**
+ * xilinx_pcie_parse_dt - Parse Device tree
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource regs;
+ const char *type;
+ int err, mode_val, val;
+
+ type = of_get_property(node, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ err = of_address_to_resource(node, 0, &regs);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+ return err;
+ }
+
+ port->reg_base = devm_ioremap_resource(dev, &regs);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+
+ val = pcie_read(port, XILINX_PCIE_REG_BIR);
+ val = (val >> XILINX_PCIE_FIFO_SHIFT) & MSI_DECD_MODE;
+ mode_val = pcie_read(port, XILINX_PCIE_REG_VSEC) &
+ XILINX_PCIE_VSEC_REV_MASK;
+ mode_val = mode_val >> XILINX_PCIE_VSEC_REV_SHIFT;
+ if (mode_val && !val) {
+ port->msi_mode = MSI_DECD_MODE;
+ dev_info(dev, "Using MSI Decode mode\n");
+ } else {
+ port->msi_mode = MSI_FIFO_MODE;
+ dev_info(dev, "Using MSI FIFO mode\n");
+ }
+
+ if (port->msi_mode == MSI_DECD_MODE) {
+ port->irq_misc = platform_get_irq_byname(pdev, "misc");
+ if (port->irq_misc <= 0) {
+ dev_err(dev, "Unable to find misc IRQ line\n");
+ return port->irq_misc;
+ }
+ err = devm_request_irq(dev, port->irq_misc,
+ xilinx_pcie_intr_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "xilinx-pcie", port);
+ if (err) {
+ dev_err(dev, "unable to request misc IRQ line %d\n",
+ port->irq);
+ return err;
+ }
+
+ port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0");
+ if (port->msi.irq_msi0 <= 0) {
+ dev_err(dev, "Unable to find msi0 IRQ line\n");
+ return port->msi.irq_msi0;
+ }
+
+ irq_set_chained_handler_and_data(port->msi.irq_msi0,
+ xilinx_pcie_msi_handler_low,
+ port);
+
+ port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1");
+ if (port->msi.irq_msi1 <= 0) {
+ dev_err(dev, "Unable to find msi1 IRQ line\n");
+ return port->msi.irq_msi1;
+ }
+
+ irq_set_chained_handler_and_data(port->msi.irq_msi1,
+ xilinx_pcie_msi_handler_high,
+ port);
+
+ } else if (port->msi_mode == MSI_FIFO_MODE) {
+ port->irq = irq_of_parse_and_map(node, 0);
+ if (!port->irq) {
+ dev_err(dev, "Unable to find IRQ line\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "xilinx-pcie", port);
+ if (err) {
+ dev_err(dev, "unable to request irq %d\n", port->irq);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_probe - Probe function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_probe(struct platform_device *pdev)
+{
+ struct xilinx_pcie_port *port;
+ struct device *dev = &pdev->dev;
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ struct pci_host_bridge *bridge;
+ int err;
+ resource_size_t iobase = 0;
+ LIST_HEAD(res);
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!bridge)
+ return -ENODEV;
+
+ port = pci_host_bridge_priv(bridge);
+
+ port->dev = dev;
+
+ err = xilinx_pcie_parse_dt(port);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ return err;
+ }
+
+ xilinx_pcie_init_port(port);
+
+ err = xilinx_pcie_init_irq_domain(port);
+ if (err) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ return err;
+ }
+
+ err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
+ &iobase);
+ if (err) {
+ dev_err(dev, "Getting bridge resources failed\n");
+ return err;
+ }
+
+ err = devm_request_pci_bus_resources(dev, &res);
+ if (err)
+ goto error;
+
+ list_splice_init(&res, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = port;
+ bridge->busnr = port->root_busno;
+ bridge->ops = &xilinx_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ err = pci_scan_root_bus_bridge(bridge);
+ if (err)
+ goto error;
+
+ bus = bridge->bus;
+
+ pci_assign_unassigned_bus_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ pci_bus_add_devices(bus);
+ return 0;
+
+error:
+ pci_free_resource_list(&res);
+ return err;
+}
+
+static const struct of_device_id xilinx_pcie_of_match[] = {
+ { .compatible = "xlnx,xdma-host-3.00", },
+ {}
+};
+
+static struct platform_driver xilinx_pcie_driver = {
+ .driver = {
+ .name = "xilinx-pcie",
+ .of_match_table = xilinx_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = xilinx_pcie_probe,
+};
+
+builtin_platform_driver(xilinx_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 45c0f344ccd1..bb7d50ac4d54 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -6,6 +6,7 @@
* (C) Copyright 2014 - 2015, Xilinx, Inc.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -20,6 +21,8 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include "../pci.h"
@@ -37,6 +40,11 @@
#define E_ECAM_CONTROL 0x00000228
#define E_ECAM_BASE_LO 0x00000230
#define E_ECAM_BASE_HI 0x00000234
+#define E_DREG_CTRL 0x00000288
+#define E_DREG_BASE_LO 0x00000290
+
+#define DREG_DMA_EN BIT(0)
+#define DREG_DMA_BASE_LO 0xFD0F0000
/* Ingress - address translations */
#define I_MSII_CAPABILITIES 0x00000300
@@ -55,6 +63,10 @@
#define MSGF_MSI_STATUS_HI 0x00000444
#define MSGF_MSI_MASK_LO 0x00000448
#define MSGF_MSI_MASK_HI 0x0000044C
+/* Root DMA Interrupt register */
+#define MSGF_DMA_MASK 0x00000464
+
+#define MSGF_INTR_EN BIT(0)
/* Msg filter mask bits */
#define CFG_ENABLE_PM_MSG_FWD BIT(1)
@@ -169,6 +181,7 @@ struct nwl_pcie {
u8 root_busno;
struct nwl_msi msi;
struct irq_domain *legacy_irq_domain;
+ struct clk *clk;
raw_spinlock_t leg_mask_lock;
};
@@ -491,7 +504,7 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
- domain->host_data, handle_simple_irq,
+ domain->host_data, handle_simple_irq,
NULL, NULL);
}
mutex_unlock(&msi->lock);
@@ -499,7 +512,7 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
}
static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
+ unsigned int nr_irqs)
{
struct irq_data *data = irq_domain_get_irq_data(domain, virq);
struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -753,7 +766,6 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
/* Enable all misc interrupts */
nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
-
/* Disable all legacy interrupts */
nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
@@ -761,6 +773,12 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
+ /* Enabling DREG translations */
+ nwl_bridge_writel(pcie, DREG_DMA_EN, E_DREG_CTRL);
+ nwl_bridge_writel(pcie, DREG_DMA_BASE_LO, E_DREG_BASE_LO);
+ /* Enabling Root DMA interrupts */
+ nwl_bridge_writel(pcie, MSGF_INTR_EN, MSGF_DMA_MASK);
+
/* Enable all legacy interrupts */
nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
@@ -813,6 +831,31 @@ static const struct of_device_id nwl_pcie_of_match[] = {
{}
};
+static int nwl_pcie_reset_ep_device(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int gpio;
+ int err;
+
+ gpio = of_get_named_gpio(node, "reset-gpio", 0);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(&pdev->dev, "failed to parse reset gpio\n");
+ return gpio;
+ }
+
+ err = devm_gpio_request_one(&pdev->dev, gpio, GPIOF_OUT_INIT_HIGH,
+ "pcie reset gpio");
+ if (err)
+ return err;
+
+ udelay(2);
+ gpio_set_value(gpio, 0);
+ udelay(10);
+ gpio_set_value(gpio, 1);
+
+ return err;
+}
+
static int nwl_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -824,6 +867,14 @@ static int nwl_pcie_probe(struct platform_device *pdev)
resource_size_t iobase = 0;
LIST_HEAD(res);
+ err = nwl_pcie_reset_ep_device(pdev);
+ if (err) {
+ dev_err(dev, "fail to reset pcie device\n");
+ return err;
+ }
+ /* wait for ep device reset finished */
+ mdelay(100);
+
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!bridge)
return -ENODEV;
@@ -839,6 +890,11 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+ clk_prepare_enable(pcie->clk);
+
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(dev, "HW Initialization failed\n");
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 0263db2ac874..692fa987dba3 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -67,6 +67,15 @@ source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
source "drivers/phy/socionext/Kconfig"
source "drivers/phy/st/Kconfig"
+
+config PHY_XILINX_ZYNQMP
+ tristate "Xilinx ZynqMP PHY driver"
+ depends on ARCH_ZYNQMP
+ select GENERIC_PHY
+ help
+ Enable this to support ZynqMP High Speed Gigabit Transceiver
+ that is part of ZynqMP SoC.
+
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 0d9fddc498a6..8866f9ae8a45 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -28,3 +28,4 @@ obj-y += broadcom/ \
socionext/ \
st/ \
ti/
+obj-$(CONFIG_PHY_XILINX_ZYNQMP) += phy-zynqmp.o
diff --git a/drivers/phy/phy-zynqmp.c b/drivers/phy/phy-zynqmp.c
new file mode 100644
index 000000000000..6bd746ac84b8
--- /dev/null
+++ b/drivers/phy/phy-zynqmp.c
@@ -0,0 +1,1591 @@
+/*
+ * phy-zynqmp.c - PHY driver for Xilinx ZynqMP GT.
+ *
+ * Copyright (C) 2015 - 2016 Xilinx Inc.
+ *
+ * Author: Subbaraya Sundeep <sbhatta@xilinx.com>
+ * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver is tested for USB and SATA currently.
+ * Other controllers PCIe, Display Port and SGMII should also
+ * work but that is experimental as of now.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-zynqmp.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <dt-bindings/phy/phy.h>
+#include <linux/soc/xilinx/zynqmp/fw.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/reset.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#define MAX_LANES 4
+
+#define RST_TIMEOUT 1000
+
+#define ICM_CFG0 0x10010
+#define ICM_CFG1 0x10014
+#define ICM_CFG0_L0_MASK 0x07
+#define ICM_CFG0_L1_MASK 0x70
+#define ICM_CFG1_L2_MASK 0x07
+#define ICM_CFG2_L3_MASK 0x70
+
+#define TM_CMN_RST 0x10018
+#define TM_CMN_RST_MASK 0x3
+#define TM_CMN_RST_EN 0x1
+#define TM_CMN_RST_SET 0x2
+
+#define ICM_PROTOCOL_PD 0x0
+#define ICM_PROTOCOL_PCIE 0x1
+#define ICM_PROTOCOL_SATA 0x2
+#define ICM_PROTOCOL_USB 0x3
+#define ICM_PROTOCOL_DP 0x4
+#define ICM_PROTOCOL_SGMII 0x5
+
+#define PLL_REF_SEL0 0x10000
+#define PLL_REF_OFFSET 0x4
+#define PLL_FREQ_MASK 0x1F
+
+#define L0_L0_REF_CLK_SEL 0x2860
+
+#define L0_PLL_STATUS_READ_1 0x23E4
+#define PLL_STATUS_READ_OFFSET 0x4000
+#define PLL_STATUS_LOCKED 0x10
+
+#define L0_PLL_SS_STEP_SIZE_0_LSB 0x2370
+#define L0_PLL_SS_STEP_SIZE_1 0x2374
+#define L0_PLL_SS_STEP_SIZE_2 0x2378
+#define L0_PLL_SS_STEP_SIZE_3_MSB 0x237C
+#define STEP_SIZE_OFFSET 0x4000
+#define STEP_SIZE_0_MASK 0xFF
+#define STEP_SIZE_1_MASK 0xFF
+#define STEP_SIZE_2_MASK 0xFF
+#define STEP_SIZE_3_MASK 0x3
+#define FORCE_STEP_SIZE 0x10
+#define FORCE_STEPS 0x20
+
+#define L0_PLL_SS_STEPS_0_LSB 0x2368
+#define L0_PLL_SS_STEPS_1_MSB 0x236C
+#define STEPS_OFFSET 0x4000
+#define STEPS_0_MASK 0xFF
+#define STEPS_1_MASK 0x07
+
+#define BGCAL_REF_SEL 0x10028
+#define BGCAL_REF_VALUE 0x0C
+
+#define L3_TM_CALIB_DIG19 0xEC4C
+#define L3_TM_CALIB_DIG19_NSW 0x07
+
+#define TM_OVERRIDE_NSW_CODE 0x20
+
+#define L3_CALIB_DONE_STATUS 0xEF14
+#define CALIB_DONE 0x02
+
+#define L0_TXPMA_ST_3 0x0B0C
+#define DN_CALIB_CODE 0x3F
+#define DN_CALIB_SHIFT 3
+
+#define L3_TM_CALIB_DIG18 0xEC48
+#define L3_TM_CALIB_DIG18_NSW 0xE0
+#define NSW_SHIFT 5
+#define NSW_PIPE_SHIFT 4
+
+#define L0_TM_PLL_DIG_37 0x2094
+#define TM_PLL_DIG_37_OFFSET 0x4000
+#define TM_COARSE_CODE_LIMIT 0x10
+
+#define L0_TM_DIG_6 0x106C
+#define TM_DIG_6_OFFSET 0x4000
+#define TM_DISABLE_DESCRAMBLE_DECODER 0x0F
+
+#define L0_TX_DIG_61 0x00F4
+#define TX_DIG_61_OFFSET 0x4000
+#define TM_DISABLE_SCRAMBLE_ENCODER 0x0F
+
+#define L0_TX_ANA_TM_18 0x0048
+#define TX_ANA_TM_18_OFFSET 0x4000
+
+#define L0_TX_ANA_TM_118 0x01D8
+#define TX_ANA_TM_118_OFFSET 0x4000
+#define L0_TX_ANA_TM_118_FORCE_17_0 BIT(0)
+
+#define L0_TXPMD_TM_45 0x0CB4
+#define TXPMD_TM_45_OFFSET 0x4000
+#define L0_TXPMD_TM_45_OVER_DP_MAIN BIT(0)
+#define L0_TXPMD_TM_45_ENABLE_DP_MAIN BIT(1)
+#define L0_TXPMD_TM_45_OVER_DP_POST1 BIT(2)
+#define L0_TXPMD_TM_45_ENABLE_DP_POST1 BIT(3)
+#define L0_TXPMD_TM_45_OVER_DP_POST2 BIT(4)
+#define L0_TXPMD_TM_45_ENABLE_DP_POST2 BIT(5)
+
+#define L0_TXPMD_TM_48 0x0CC0
+#define TXPMD_TM_48_OFFSET 0x4000
+
+#define TX_PROT_BUS_WIDTH 0x10040
+#define RX_PROT_BUS_WIDTH 0x10044
+
+#define PROT_BUS_WIDTH_SHIFT 2
+#define PROT_BUS_WIDTH_10 0x0
+#define PROT_BUS_WIDTH_20 0x1
+#define PROT_BUS_WIDTH_40 0x2
+
+#define LANE_CLK_SHARE_MASK 0x8F
+
+#define SATA_CONTROL_OFFSET 0x0100
+
+#define CONTROLLERS_PER_LANE 5
+
+#define PIPE_CLK_OFFSET 0x7c
+#define PIPE_CLK_ON 1
+#define PIPE_CLK_OFF 0
+#define PIPE_POWER_OFFSET 0x80
+#define PIPE_POWER_ON 1
+#define PIPE_POWER_OFF 0
+
+#define XPSGTR_TYPE_USB0 0 /* USB controller 0 */
+#define XPSGTR_TYPE_USB1 1 /* USB controller 1 */
+#define XPSGTR_TYPE_SATA_0 2 /* SATA controller lane 0 */
+#define XPSGTR_TYPE_SATA_1 3 /* SATA controller lane 1 */
+#define XPSGTR_TYPE_PCIE_0 4 /* PCIe controller lane 0 */
+#define XPSGTR_TYPE_PCIE_1 5 /* PCIe controller lane 1 */
+#define XPSGTR_TYPE_PCIE_2 6 /* PCIe controller lane 2 */
+#define XPSGTR_TYPE_PCIE_3 7 /* PCIe controller lane 3 */
+#define XPSGTR_TYPE_DP_0 8 /* Display Port controller lane 0 */
+#define XPSGTR_TYPE_DP_1 9 /* Display Port controller lane 1 */
+#define XPSGTR_TYPE_SGMII0 10 /* Ethernet SGMII controller 0 */
+#define XPSGTR_TYPE_SGMII1 11 /* Ethernet SGMII controller 1 */
+#define XPSGTR_TYPE_SGMII2 12 /* Ethernet SGMII controller 2 */
+#define XPSGTR_TYPE_SGMII3 13 /* Ethernet SGMII controller 3 */
+
+/*
+ * This table holds the valid combinations of controllers and
+ * lanes(Interconnect Matrix).
+ */
+static unsigned int icm_matrix[][CONTROLLERS_PER_LANE] = {
+ { XPSGTR_TYPE_PCIE_0, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII0 },
+ { XPSGTR_TYPE_PCIE_1, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII1 },
+ { XPSGTR_TYPE_PCIE_2, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII2 },
+ { XPSGTR_TYPE_PCIE_3, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB1,
+ XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII3 }
+};
+
+/* Allowed PLL reference clock frequencies */
+enum pll_frequencies {
+ REF_19_2M = 0,
+ REF_20M,
+ REF_24M,
+ REF_26M,
+ REF_27M,
+ REF_38_4M,
+ REF_40M,
+ REF_52M,
+ REF_100M,
+ REF_108M,
+ REF_125M,
+ REF_135M,
+ REF_150M,
+};
+
+/**
+ * struct xpsgtr_phy - representation of a lane
+ * @phy: pointer to the kernel PHY device
+ * @type: controller which uses this lane
+ * @lane: lane number
+ * @protocol: protocol in which the lane operates
+ * @ref_clk: enum of allowed ref clock rates for this lane PLL
+ * @pll_lock: PLL status
+ * @skip_phy_init: skip phy_init() if true
+ * @data: pointer to hold private data
+ * @refclk_rate: PLL reference clock frequency
+ * @share_laneclk: lane number of the clock to be shared
+ */
+struct xpsgtr_phy {
+ struct phy *phy;
+ u8 type;
+ u8 lane;
+ u8 protocol;
+ enum pll_frequencies ref_clk;
+ bool pll_lock;
+ bool skip_phy_init;
+ void *data;
+ u32 refclk_rate;
+ u32 share_laneclk;
+};
+
+/**
+ * struct xpsgtr_ssc - structure to hold SSC settings for a lane
+ * @refclk_rate: PLL reference clock frequency
+ * @pll_ref_clk: value to be written to register for corresponding ref clk rate
+ * @steps: number of steps of SSC (Spread Spectrum Clock)
+ * @step_size: step size of each step
+ */
+struct xpsgtr_ssc {
+ u32 refclk_rate;
+ u8 pll_ref_clk;
+ u32 steps;
+ u32 step_size;
+};
+
+/* lookup table to hold all settings needed for a ref clock frequency */
+static struct xpsgtr_ssc ssc_lookup[] = {
+ {19200000, 0x05, 608, 264020},
+ {20000000, 0x06, 634, 243454},
+ {24000000, 0x07, 760, 168973},
+ {26000000, 0x08, 824, 143860},
+ {27000000, 0x09, 856, 86551},
+ {38400000, 0x0A, 1218, 65896},
+ {40000000, 0x0B, 634, 243454},
+ {52000000, 0x0C, 824, 143860},
+ {100000000, 0x0D, 1058, 87533},
+ {108000000, 0x0E, 856, 86551},
+ {125000000, 0x0F, 992, 119497},
+ {135000000, 0x10, 1070, 55393},
+ {150000000, 0x11, 792, 187091}
+};
+
+/**
+ * struct xpsgtr_dev - representation of a ZynMP GT device
+ * @dev: pointer to device
+ * @serdes: serdes base address
+ * @siou: siou base address
+ * @gtr_mutex: mutex for locking
+ * @phys: pointer to all the lanes
+ * @tx_term_fix: fix for GT issue
+ * @saved_icm_cfg0: stored value of ICM CFG0 register
+ * @saved_icm_cfg1: stored value of ICM CFG1 register
+ * @sata_rst: a reset control for SATA
+ * @dp_rst: a reset control for DP
+ * @usb0_crst: a reset control for usb0 core
+ * @usb1_crst: a reset control for usb1 core
+ * @usb0_hibrst: a reset control for usb0 hibernation module
+ * @usb1_hibrst: a reset control for usb1 hibernation module
+ * @usb0_apbrst: a reset control for usb0 apb bus
+ * @usb1_apbrst: a reset control for usb1 apb bus
+ * @gem0_rst: a reset control for gem0
+ * @gem1_rst: a reset control for gem1
+ * @gem2_rst: a reset control for gem2
+ * @gem3_rst: a reset control for gem3
+ */
+struct xpsgtr_dev {
+ struct device *dev;
+ void __iomem *serdes;
+ void __iomem *siou;
+ struct mutex gtr_mutex;
+ struct xpsgtr_phy **phys;
+ bool tx_term_fix;
+ unsigned int saved_icm_cfg0;
+ unsigned int saved_icm_cfg1;
+ struct reset_control *sata_rst;
+ struct reset_control *dp_rst;
+ struct reset_control *usb0_crst;
+ struct reset_control *usb1_crst;
+ struct reset_control *usb0_hibrst;
+ struct reset_control *usb1_hibrst;
+ struct reset_control *usb0_apbrst;
+ struct reset_control *usb1_apbrst;
+ struct reset_control *gem0_rst;
+ struct reset_control *gem1_rst;
+ struct reset_control *gem2_rst;
+ struct reset_control *gem3_rst;
+};
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+int xpsgtr_override_deemph(struct phy *phy, u8 plvl, u8 vlvl)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ static u8 pe[4][4] = { { 0x2, 0x2, 0x2, 0x2 },
+ { 0x1, 0x1, 0x1, 0xff },
+ { 0x0, 0x0, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff } };
+
+ writel(pe[plvl][vlvl],
+ gtr_dev->serdes + gtr_phy->lane * TX_ANA_TM_18_OFFSET +
+ L0_TX_ANA_TM_18);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xpsgtr_override_deemph);
+
+int xpsgtr_margining_factor(struct phy *phy, u8 plvl, u8 vlvl)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ static u8 vs[4][4] = { { 0x2a, 0x27, 0x24, 0x20 },
+ { 0x27, 0x23, 0x20, 0xff },
+ { 0x24, 0x20, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff } };
+
+ writel(vs[plvl][vlvl],
+ gtr_dev->serdes + gtr_phy->lane * TXPMD_TM_48_OFFSET +
+ L0_TXPMD_TM_48);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xpsgtr_margining_factor);
+
+/**
+ * xpsgtr_configure_pll - configures SSC settings for a lane
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ u32 reg;
+ u32 offset;
+ u32 steps;
+ u32 size;
+ u8 pll_ref_clk;
+
+ steps = ssc_lookup[gtr_phy->ref_clk].steps;
+ size = ssc_lookup[gtr_phy->ref_clk].step_size;
+ pll_ref_clk = ssc_lookup[gtr_phy->ref_clk].pll_ref_clk;
+
+ offset = gtr_phy->lane * PLL_REF_OFFSET + PLL_REF_SEL0;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~PLL_FREQ_MASK) | pll_ref_clk;
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* Enable lane clock sharing, if required */
+ if (gtr_phy->share_laneclk != gtr_phy->lane) {
+ /* Lane3 Ref Clock Selection Register */
+ offset = gtr_phy->lane * PLL_REF_OFFSET + L0_L0_REF_CLK_SEL;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~LANE_CLK_SHARE_MASK) |
+ (1 << gtr_phy->share_laneclk);
+ writel(reg, gtr_dev->serdes + offset);
+ }
+
+ /* SSC step size [7:0] */
+ offset = gtr_phy->lane * STEP_SIZE_OFFSET + L0_PLL_SS_STEP_SIZE_0_LSB;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEP_SIZE_0_MASK) |
+ (size & STEP_SIZE_0_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC step size [15:8] */
+ size = size >> 8;
+ offset = gtr_phy->lane * STEP_SIZE_OFFSET + L0_PLL_SS_STEP_SIZE_1;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEP_SIZE_1_MASK) |
+ (size & STEP_SIZE_1_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC step size [23:16] */
+ size = size >> 8;
+ offset = gtr_phy->lane * STEP_SIZE_OFFSET + L0_PLL_SS_STEP_SIZE_2;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEP_SIZE_2_MASK) |
+ (size & STEP_SIZE_2_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC steps [7:0] */
+ offset = gtr_phy->lane * STEPS_OFFSET + L0_PLL_SS_STEPS_0_LSB;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEPS_0_MASK) |
+ (steps & STEPS_0_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC steps [10:8] */
+ steps = steps >> 8;
+ offset = gtr_phy->lane * STEPS_OFFSET + L0_PLL_SS_STEPS_1_MSB;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEPS_1_MASK) |
+ (steps & STEPS_1_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC step size [24:25] */
+ size = size >> 8;
+ offset = gtr_phy->lane * STEP_SIZE_OFFSET + L0_PLL_SS_STEP_SIZE_3_MSB;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEP_SIZE_3_MASK) |
+ (size & STEP_SIZE_3_MASK);
+ reg |= FORCE_STEP_SIZE | FORCE_STEPS;
+ writel(reg, gtr_dev->serdes + offset);
+}
+
+/**
+ * xpsgtr_lane_setprotocol - sets required protocol in ICM registers
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_lane_setprotocol(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ u32 reg;
+ u8 protocol = gtr_phy->protocol;
+
+ switch (gtr_phy->lane) {
+ case 0:
+ reg = readl(gtr_dev->serdes + ICM_CFG0);
+ reg = (reg & ~ICM_CFG0_L0_MASK) | protocol;
+ writel(reg, gtr_dev->serdes + ICM_CFG0);
+ break;
+ case 1:
+ reg = readl(gtr_dev->serdes + ICM_CFG0);
+ reg = (reg & ~ICM_CFG0_L1_MASK) | (protocol << 4);
+ writel(reg, gtr_dev->serdes + ICM_CFG0);
+ break;
+ case 2:
+ reg = readl(gtr_dev->serdes + ICM_CFG1);
+ reg = (reg & ~ICM_CFG0_L0_MASK) | protocol;
+ writel(reg, gtr_dev->serdes + ICM_CFG1);
+ break;
+ case 3:
+ reg = readl(gtr_dev->serdes + ICM_CFG1);
+ reg = (reg & ~ICM_CFG0_L1_MASK) | (protocol << 4);
+ writel(reg, gtr_dev->serdes + ICM_CFG1);
+ break;
+ default:
+ /* We already checked 0 <= lane <= 3 */
+ break;
+ }
+}
+
+/**
+ * xpsgtr_get_ssc - gets the required ssc settings based on clk rate
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_get_ssc(struct xpsgtr_phy *gtr_phy)
+{
+ u32 i;
+
+ /*
+ * Assign the required spread spectrum(SSC) settings
+ * from lane refernce clk rate
+ */
+ for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
+ if (gtr_phy->refclk_rate == ssc_lookup[i].refclk_rate) {
+ gtr_phy->ref_clk = i;
+ return 0;
+ }
+ }
+
+ /* Did not get valid ssc settings*/
+ return -EINVAL;
+}
+
+/**
+ * xpsgtr_configure_lane - configures SSC settings for a lane
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_configure_lane(struct xpsgtr_phy *gtr_phy)
+{
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ case XPSGTR_TYPE_USB1:
+ gtr_phy->protocol = ICM_PROTOCOL_USB;
+ break;
+ case XPSGTR_TYPE_SATA_0:
+ case XPSGTR_TYPE_SATA_1:
+ gtr_phy->protocol = ICM_PROTOCOL_SATA;
+ break;
+ case XPSGTR_TYPE_DP_0:
+ case XPSGTR_TYPE_DP_1:
+ gtr_phy->protocol = ICM_PROTOCOL_DP;
+ break;
+ case XPSGTR_TYPE_PCIE_0:
+ case XPSGTR_TYPE_PCIE_1:
+ case XPSGTR_TYPE_PCIE_2:
+ case XPSGTR_TYPE_PCIE_3:
+ gtr_phy->protocol = ICM_PROTOCOL_PCIE;
+ break;
+ case XPSGTR_TYPE_SGMII0:
+ case XPSGTR_TYPE_SGMII1:
+ case XPSGTR_TYPE_SGMII2:
+ case XPSGTR_TYPE_SGMII3:
+ gtr_phy->protocol = ICM_PROTOCOL_SGMII;
+ break;
+ default:
+ gtr_phy->protocol = ICM_PROTOCOL_PD;
+ break;
+ }
+
+ /* Get SSC settinsg for refernce clk rate */
+ if (xpsgtr_get_ssc(gtr_phy) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * xpsgtr_config_usbpipe - configures the PIPE3 signals for USB
+ * @gtr_phy: pointer to gtr phy device
+ */
+static void xpsgtr_config_usbpipe(struct xpsgtr_phy *gtr_phy)
+{
+ struct phy *phy = gtr_phy->phy;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ void __iomem *regs = dev_get_platdata(&phy->dev);
+
+ if (regs) {
+ /* Set PIPE power present signal */
+ writel(PIPE_POWER_ON, regs + PIPE_POWER_OFFSET);
+ /* Clear PIPE CLK signal */
+ writel(PIPE_CLK_OFF, regs + PIPE_CLK_OFFSET);
+ } else {
+ dev_info(gtr_dev->dev,
+ "%s: No valid Platform_data found\n", __func__);
+ }
+}
+
+/**
+ * xpsgtr_reset_assert - asserts reset using reset framework
+ * @rstc: pointer to reset_control
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_reset_assert(struct reset_control *rstc)
+{
+ unsigned long loop_time = msecs_to_jiffies(RST_TIMEOUT);
+ unsigned long timeout;
+
+ reset_control_assert(rstc);
+
+ /* wait until reset is asserted or timeout */
+ timeout = jiffies + loop_time;
+
+ while (!time_after_eq(jiffies, timeout)) {
+ if (reset_control_status(rstc) > 0)
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * xpsgtr_reset_release - de-asserts reset using reset framework
+ * @rstc: pointer to reset_control
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_reset_release(struct reset_control *rstc)
+{
+ unsigned long loop_time = msecs_to_jiffies(RST_TIMEOUT);
+ unsigned long timeout;
+
+ reset_control_deassert(rstc);
+
+ /* wait until reset is de-asserted or timeout */
+ timeout = jiffies + loop_time;
+ while (!time_after_eq(jiffies, timeout)) {
+ if (!reset_control_status(rstc))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * xpsgtr_controller_reset - puts controller in reset
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_controller_reset(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ ret = xpsgtr_reset_assert(gtr_dev->usb0_crst);
+ ret = xpsgtr_reset_assert(gtr_dev->usb0_hibrst);
+ ret = xpsgtr_reset_assert(gtr_dev->usb0_apbrst);
+ break;
+ case XPSGTR_TYPE_USB1:
+ ret = xpsgtr_reset_assert(gtr_dev->usb1_crst);
+ ret = xpsgtr_reset_assert(gtr_dev->usb1_hibrst);
+ ret = xpsgtr_reset_assert(gtr_dev->usb1_apbrst);
+ break;
+ case XPSGTR_TYPE_SATA_0:
+ case XPSGTR_TYPE_SATA_1:
+ ret = xpsgtr_reset_assert(gtr_dev->sata_rst);
+ break;
+ case XPSGTR_TYPE_DP_0:
+ case XPSGTR_TYPE_DP_1:
+ ret = xpsgtr_reset_assert(gtr_dev->dp_rst);
+ break;
+ case XPSGTR_TYPE_SGMII0:
+ ret = xpsgtr_reset_assert(gtr_dev->gem0_rst);
+ break;
+ case XPSGTR_TYPE_SGMII1:
+ ret = xpsgtr_reset_assert(gtr_dev->gem1_rst);
+ break;
+ case XPSGTR_TYPE_SGMII2:
+ ret = xpsgtr_reset_assert(gtr_dev->gem2_rst);
+ break;
+ case XPSGTR_TYPE_SGMII3:
+ ret = xpsgtr_reset_assert(gtr_dev->gem3_rst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * xpsgtr_controller_release_reset - releases controller from reset
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_controller_release_reset(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ xpsgtr_reset_release(gtr_dev->usb0_apbrst);
+
+ /* Config PIPE3 signals after releasing APB reset */
+ xpsgtr_config_usbpipe(gtr_phy);
+
+ ret = xpsgtr_reset_release(gtr_dev->usb0_crst);
+ ret = xpsgtr_reset_release(gtr_dev->usb0_hibrst);
+ break;
+ case XPSGTR_TYPE_USB1:
+ xpsgtr_reset_release(gtr_dev->usb1_apbrst);
+
+ /* Config PIPE3 signals after releasing APB reset */
+ xpsgtr_config_usbpipe(gtr_phy);
+
+ ret = xpsgtr_reset_release(gtr_dev->usb1_crst);
+ ret = xpsgtr_reset_release(gtr_dev->usb1_hibrst);
+ break;
+ case XPSGTR_TYPE_SATA_0:
+ case XPSGTR_TYPE_SATA_1:
+ ret = xpsgtr_reset_release(gtr_dev->sata_rst);
+ break;
+ case XPSGTR_TYPE_DP_0:
+ case XPSGTR_TYPE_DP_1:
+ ret = xpsgtr_reset_release(gtr_dev->dp_rst);
+ break;
+ case XPSGTR_TYPE_SGMII0:
+ ret = xpsgtr_reset_release(gtr_dev->gem0_rst);
+ break;
+ case XPSGTR_TYPE_SGMII1:
+ ret = xpsgtr_reset_release(gtr_dev->gem1_rst);
+ break;
+ case XPSGTR_TYPE_SGMII2:
+ ret = xpsgtr_reset_release(gtr_dev->gem2_rst);
+ break;
+ case XPSGTR_TYPE_SGMII3:
+ ret = xpsgtr_reset_release(gtr_dev->gem3_rst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * xpsgtr_usb_rst_assert - assert USB core reset
+ * @phy: pointer to phy
+ *
+ * Return: 0 on success or error on failure
+ */
+int xpsgtr_usb_crst_assert(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ ret = xpsgtr_reset_assert(gtr_dev->usb0_crst);
+ break;
+ case XPSGTR_TYPE_USB1:
+ ret = xpsgtr_reset_assert(gtr_dev->usb1_crst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(xpsgtr_usb_crst_assert);
+
+/**
+ * xpsgtr_usb_rst_release - release USB core reset
+ * @phy: pointer to phy
+ *
+ * Return: 0 on success or error on failure
+ */
+int xpsgtr_usb_crst_release(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ ret = xpsgtr_reset_release(gtr_dev->usb0_crst);
+ break;
+ case XPSGTR_TYPE_USB1:
+ ret = xpsgtr_reset_release(gtr_dev->usb1_crst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(xpsgtr_usb_crst_release);
+
+int xpsgtr_wait_pll_lock(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ u32 offset, reg;
+ u32 timeout = 1000;
+ int ret = 0;
+
+ /* Check pll is locked */
+ offset = gtr_phy->lane * PLL_STATUS_READ_OFFSET + L0_PLL_STATUS_READ_1;
+ dev_dbg(gtr_dev->dev, "Waiting for PLL lock...\n");
+
+ do {
+ reg = readl(gtr_dev->serdes + offset);
+ if ((reg & PLL_STATUS_LOCKED) == PLL_STATUS_LOCKED)
+ break;
+
+ if (!--timeout) {
+ dev_err(gtr_dev->dev, "PLL lock time out\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+ udelay(1);
+ } while (1);
+
+ if (ret == 0)
+ gtr_phy->pll_lock = true;
+
+ dev_info(gtr_dev->dev, "Lane:%d type:%d protocol:%d pll_locked:%s\n",
+ gtr_phy->lane, gtr_phy->type, gtr_phy->protocol,
+ gtr_phy->pll_lock ? "yes" : "no");
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xpsgtr_wait_pll_lock);
+
+/**
+ * xpsgtr_set_txwidth - This function sets the tx bus width of the lane
+ * @gtr_phy: pointer to lane
+ * @width: tx bus width size
+ */
+static void xpsgtr_set_txwidth(struct xpsgtr_phy *gtr_phy, u32 width)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ writel(gtr_phy->lane * PROT_BUS_WIDTH_SHIFT >> width,
+ gtr_dev->serdes + TX_PROT_BUS_WIDTH);
+}
+
+/**
+ * xpsgtr_set_rxwidth - This function sets the rx bus width of the lane
+ * @gtr_phy: pointer to lane
+ * @width: rx bus width size
+ */
+static void xpsgtr_set_rxwidth(struct xpsgtr_phy *gtr_phy, u32 width)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ writel(gtr_phy->lane * PROT_BUS_WIDTH_SHIFT >> width,
+ gtr_dev->serdes + RX_PROT_BUS_WIDTH);
+}
+
+/**
+ * xpsgtr_bypass_scramenc - This bypasses scrambler and 8b/10b encoder feature
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_bypass_scramenc(struct xpsgtr_phy *gtr_phy)
+{
+ u32 offset;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ /* bypass Scrambler and 8b/10b Encoder */
+ offset = gtr_phy->lane * TX_DIG_61_OFFSET + L0_TX_DIG_61;
+ writel(TM_DISABLE_SCRAMBLE_ENCODER, gtr_dev->serdes + offset);
+}
+
+/**
+ * xpsgtr_bypass_descramdec - bypasses descrambler and 8b/10b encoder feature
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_bypass_descramdec(struct xpsgtr_phy *gtr_phy)
+{
+ u32 offset;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ /* bypass Descrambler and 8b/10b decoder */
+ offset = gtr_phy->lane * TM_DIG_6_OFFSET + L0_TM_DIG_6;
+ writel(TM_DISABLE_DESCRAMBLE_DECODER, gtr_dev->serdes + offset);
+}
+
+/**
+ * xpsgtr_misc_sgmii - miscellaneous settings for SGMII
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_misc_sgmii(struct xpsgtr_phy *gtr_phy)
+{
+ /* Set SGMII protocol tx bus width 10 bits */
+ xpsgtr_set_txwidth(gtr_phy, PROT_BUS_WIDTH_10);
+
+ /* Set SGMII protocol rx bus width 10 bits */
+ xpsgtr_set_rxwidth(gtr_phy, PROT_BUS_WIDTH_10);
+
+ /* bypass Descrambler and 8b/10b decoder */
+ xpsgtr_bypass_descramdec(gtr_phy);
+
+ /* bypass Scrambler and 8b/10b Encoder */
+ xpsgtr_bypass_scramenc(gtr_phy);
+}
+
+/**
+ * xpsgtr_misc_sata - miscellaneous settings for SATA
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_misc_sata(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ /* bypass Descrambler and 8b/10b decoder */
+ xpsgtr_bypass_descramdec(gtr_phy);
+
+ /* bypass Scrambler and 8b/10b Encoder */
+ xpsgtr_bypass_scramenc(gtr_phy);
+
+ writel(gtr_phy->lane, gtr_dev->siou + SATA_CONTROL_OFFSET);
+}
+
+/**
+ * xpsgtr_ulpi_reset - This function perform's ULPI reset sequence.
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success, -EINVAL on non existing USB type or error from
+ * communication with firmware
+ */
+static int xpsgtr_ulpi_reset(struct xpsgtr_phy *gtr_phy)
+{
+ u32 node_id;
+ int ret = 0;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ if (!eemi_ops->ioctl)
+ return -ENOTSUPP;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ node_id = NODE_USB_0;
+ break;
+ case XPSGTR_TYPE_USB1:
+ node_id = NODE_USB_1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = eemi_ops->ioctl(node_id, IOCTL_ULPI_RESET, 0, 0, NULL);
+ if (ret < 0)
+ dev_err(gtr_dev->dev, "failed to perform ULPI reset\n");
+
+ return ret;
+}
+
+/**
+ * xpsgtr_set_sgmii_pcs - This function sets the sgmii mode for GEM.
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success, -EINVAL on non existing SGMII type or error from
+ * communication with firmware
+ */
+static int xpsgtr_set_sgmii_pcs(struct xpsgtr_phy *gtr_phy)
+{
+ u32 node_id;
+ int ret = 0;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ if (!eemi_ops->ioctl)
+ return -ENOTSUPP;
+
+ /* Set the PCS signal detect to 1 */
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_SGMII0:
+ node_id = NODE_ETH_0;
+ break;
+ case XPSGTR_TYPE_SGMII1:
+ node_id = NODE_ETH_1;
+ break;
+ case XPSGTR_TYPE_SGMII2:
+ node_id = NODE_ETH_2;
+ break;
+ case XPSGTR_TYPE_SGMII3:
+ node_id = NODE_ETH_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = eemi_ops->ioctl(node_id, IOCTL_SET_SGMII_MODE,
+ PM_SGMII_ENABLE, 0, NULL);
+ if (ret < 0) {
+ dev_err(gtr_dev->dev, "failed to set GEM to SGMII mode\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * xpsgtr_phyinit_required - check if phy_init for the lane can be skipped
+ * @gtr_phy: pointer to the phy lane
+ *
+ * Return: true if phy_init can be skipped or false
+ */
+static bool xpsgtr_phyinit_required(struct xpsgtr_phy *gtr_phy)
+{
+ /*
+ * As USB may save the snapshot of the states during hibernation, doing
+ * phy_init() will put the USB controller into reset, resulting in the
+ * losing of the saved snapshot. So try to avoid phy_init() for USB
+ * except when gtr_phy->skip_phy_init is false (this happens when FPD is
+ * shutdown during suspend or when gt lane is changed from current one)
+ */
+ if (gtr_phy->protocol == ICM_PROTOCOL_USB && gtr_phy->skip_phy_init)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * xpsgtr_phy_init - initializes a lane
+ * @phy: pointer to kernel PHY device
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_phy_init(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret = 0;
+ u32 offset;
+ u32 reg;
+ u32 nsw;
+ u32 timeout = 500;
+
+ mutex_lock(&gtr_dev->gtr_mutex);
+
+ /* Check if phy_init() is required */
+ if (xpsgtr_phyinit_required(gtr_phy))
+ goto out;
+
+ /* Put controller in reset */
+ ret = xpsgtr_controller_reset(gtr_phy);
+ if (ret != 0) {
+ dev_err(gtr_dev->dev, "Failed to assert reset\n");
+ goto out;
+ }
+
+ /*
+ * There is a functional issue in the GT. The TX termination resistance
+ * can be out of spec due to a bug in the calibration logic. Below is
+ * the workaround to fix it. This below is required for XCZU9EG silicon.
+ */
+ if (gtr_dev->tx_term_fix) {
+ /* Enabling Test Mode control for CMN Rest */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_SET;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ /* Set Test Mode reset */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_EN;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ writel(0x00, gtr_dev->serdes + L3_TM_CALIB_DIG18);
+ writel(TM_OVERRIDE_NSW_CODE, gtr_dev->serdes +
+ L3_TM_CALIB_DIG19);
+
+ /* As a part of work around sequence for PMOS calibration fix,
+ * we need to configure any lane ICM_CFG to valid protocol. This
+ * will deassert the CMN_Resetn signal.
+ */
+ xpsgtr_lane_setprotocol(gtr_phy);
+
+ /* Clear Test Mode reset */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_SET;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ dev_dbg(gtr_dev->dev, "calibrating...\n");
+
+ do {
+ reg = readl(gtr_dev->serdes + L3_CALIB_DONE_STATUS);
+ if ((reg & CALIB_DONE) == CALIB_DONE)
+ break;
+
+ if (!--timeout) {
+ dev_err(gtr_dev->dev, "calibration time out\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ udelay(1);
+ } while (1);
+
+ dev_dbg(gtr_dev->dev, "calibration done\n");
+
+ /* Reading NMOS Register Code */
+ nsw = readl(gtr_dev->serdes + L0_TXPMA_ST_3);
+
+ /* Set Test Mode reset */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_EN;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ nsw = nsw & DN_CALIB_CODE;
+
+ /* Writing NMOS register values back [5:3] */
+ reg = nsw >> DN_CALIB_SHIFT;
+ writel(reg, gtr_dev->serdes + L3_TM_CALIB_DIG19);
+
+ /* Writing NMOS register value [2:0] */
+ reg = ((nsw & 0x7) << NSW_SHIFT) | (1 << NSW_PIPE_SHIFT);
+ writel(reg, gtr_dev->serdes + L3_TM_CALIB_DIG18);
+
+ /* Clear Test Mode reset */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_SET;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ gtr_dev->tx_term_fix = false;
+ }
+
+ /* Enable coarse code saturation limiting logic */
+ offset = gtr_phy->lane * TM_PLL_DIG_37_OFFSET + L0_TM_PLL_DIG_37;
+ writel(TM_COARSE_CODE_LIMIT, gtr_dev->serdes + offset);
+
+ xpsgtr_configure_pll(gtr_phy);
+ xpsgtr_lane_setprotocol(gtr_phy);
+
+ if (gtr_phy->protocol == ICM_PROTOCOL_SATA)
+ xpsgtr_misc_sata(gtr_phy);
+
+ if (gtr_phy->protocol == ICM_PROTOCOL_SGMII)
+ xpsgtr_misc_sgmii(gtr_phy);
+
+ /* Bring controller out of reset */
+ ret = xpsgtr_controller_release_reset(gtr_phy);
+ if (ret != 0) {
+ dev_err(gtr_dev->dev, "Failed to release reset\n");
+ goto out;
+ }
+
+ /* Wait till pll is locked for all protocols except DP. For DP
+ * pll locking function will be called from driver.
+ */
+ if (gtr_phy->protocol != ICM_PROTOCOL_DP) {
+ ret = xpsgtr_wait_pll_lock(phy);
+ if (ret != 0)
+ goto out;
+ } else {
+ offset = gtr_phy->lane * TXPMD_TM_45_OFFSET + L0_TXPMD_TM_45;
+ reg = L0_TXPMD_TM_45_OVER_DP_MAIN |
+ L0_TXPMD_TM_45_ENABLE_DP_MAIN |
+ L0_TXPMD_TM_45_OVER_DP_POST1 |
+ L0_TXPMD_TM_45_OVER_DP_POST2 |
+ L0_TXPMD_TM_45_ENABLE_DP_POST2;
+ writel(reg, gtr_dev->serdes + offset);
+ offset = gtr_phy->lane * TX_ANA_TM_118_OFFSET +
+ L0_TX_ANA_TM_118;
+ writel(L0_TX_ANA_TM_118_FORCE_17_0,
+ gtr_dev->serdes + offset);
+ }
+
+ /* Do ULPI reset for usb */
+ if (gtr_phy->protocol == ICM_PROTOCOL_USB)
+ ret = xpsgtr_ulpi_reset(gtr_phy);
+
+ /* Select SGMII Mode for GEM and set the PCS Signal detect*/
+ if (gtr_phy->protocol == ICM_PROTOCOL_SGMII)
+ ret = xpsgtr_set_sgmii_pcs(gtr_phy);
+out:
+ mutex_unlock(&gtr_dev->gtr_mutex);
+ return ret;
+}
+
+/**
+ * xpsgtr_set_lanetype - derives lane type from dts arguments
+ * @gtr_phy: pointer to lane
+ * @controller: type of controller
+ * @instance_num: instance number of the controller in case multilane controller
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_set_lanetype(struct xpsgtr_phy *gtr_phy, u8 controller,
+ u8 instance_num)
+{
+ switch (controller) {
+ case PHY_TYPE_SATA:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_SATA_0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_SATA_1;
+ else
+ return -EINVAL;
+ break;
+ case PHY_TYPE_USB3:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_USB0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_USB1;
+ else
+ return -EINVAL;
+ break;
+ case PHY_TYPE_DP:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_DP_0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_DP_1;
+ else
+ return -EINVAL;
+ break;
+ case PHY_TYPE_PCIE:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_PCIE_0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_PCIE_1;
+ else if (instance_num == 2)
+ gtr_phy->type = XPSGTR_TYPE_PCIE_2;
+ else if (instance_num == 3)
+ gtr_phy->type = XPSGTR_TYPE_PCIE_3;
+ else
+ return -EINVAL;
+ break;
+ case PHY_TYPE_SGMII:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_SGMII0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_SGMII1;
+ else if (instance_num == 2)
+ gtr_phy->type = XPSGTR_TYPE_SGMII2;
+ else if (instance_num == 3)
+ gtr_phy->type = XPSGTR_TYPE_SGMII3;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * xpsgtr_xlate - provides a PHY specific to a controller
+ * @dev: pointer to device
+ * @args: arguments from dts
+ *
+ * Return: pointer to kernel PHY device or error on failure
+ */
+static struct phy *xpsgtr_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+ struct xpsgtr_phy *gtr_phy = NULL;
+ struct device_node *phynode = args->np;
+ int index;
+ int i;
+ u8 controller;
+ u8 instance_num;
+
+ if (args->args_count != 4) {
+ dev_err(dev, "Invalid number of cells in 'phy' property\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if (!of_device_is_available(phynode)) {
+ dev_warn(dev, "requested PHY is disabled\n");
+ return ERR_PTR(-ENODEV);
+ }
+ for (index = 0; index < of_get_child_count(dev->of_node); index++) {
+ if (phynode == gtr_dev->phys[index]->phy->dev.of_node) {
+ gtr_phy = gtr_dev->phys[index];
+ break;
+ }
+ }
+ if (!gtr_phy) {
+ dev_err(dev, "failed to find appropriate phy\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* get type of controller from phys */
+ controller = args->args[0];
+
+ /* get controller instance number */
+ instance_num = args->args[1];
+
+ /* Check if lane sharing is required */
+ gtr_phy->share_laneclk = args->args[2];
+
+ /* get the required clk rate for controller from phys */
+ gtr_phy->refclk_rate = args->args[3];
+
+ /* derive lane type */
+ if (xpsgtr_set_lanetype(gtr_phy, controller, instance_num) < 0) {
+ dev_err(gtr_dev->dev, "Invalid lane type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* configures SSC settings for a lane */
+ if (xpsgtr_configure_lane(gtr_phy) < 0) {
+ dev_err(gtr_dev->dev, "Invalid clock rate: %d\n",
+ gtr_phy->refclk_rate);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Check Interconnect Matrix is obeyed i.e, given lane type
+ * is allowed to operate on the lane.
+ */
+ for (i = 0; i < CONTROLLERS_PER_LANE; i++) {
+ if (icm_matrix[index][i] == gtr_phy->type)
+ return gtr_phy->phy;
+ }
+
+ /* Should not reach here */
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * xpsgtr_phy_exit - clears previous initialized variables
+ * @phy: pointer to kernel PHY device
+ *
+ * Return: 0 on success
+ */
+static int xpsgtr_phy_exit(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+
+ /* As we are exiting, clear skip_phy_init flag */
+ gtr_phy->skip_phy_init = false;
+
+ return 0;
+}
+
+static struct phy_ops xpsgtr_phyops = {
+ .init = xpsgtr_phy_init,
+ .exit = xpsgtr_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * xpsgtr_get_resets - Gets reset signals based on reset-names property
+ * @gtr_dev: pointer to structure which stores reset information
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int xpsgtr_get_resets(struct xpsgtr_dev *gtr_dev)
+{
+ char *name;
+ struct reset_control *rst_temp;
+
+ gtr_dev->sata_rst = devm_reset_control_get(gtr_dev->dev, "sata_rst");
+ if (IS_ERR(gtr_dev->sata_rst)) {
+ name = "sata_rst";
+ rst_temp = gtr_dev->sata_rst;
+ goto error;
+ }
+
+ gtr_dev->dp_rst = devm_reset_control_get(gtr_dev->dev, "dp_rst");
+ if (IS_ERR(gtr_dev->dp_rst)) {
+ name = "dp_rst";
+ rst_temp = gtr_dev->dp_rst;
+ goto error;
+ }
+
+ gtr_dev->usb0_crst = devm_reset_control_get(gtr_dev->dev, "usb0_crst");
+ if (IS_ERR(gtr_dev->usb0_crst)) {
+ name = "usb0_crst";
+ rst_temp = gtr_dev->usb0_crst;
+ goto error;
+ }
+
+ gtr_dev->usb1_crst = devm_reset_control_get(gtr_dev->dev, "usb1_crst");
+ if (IS_ERR(gtr_dev->usb1_crst)) {
+ name = "usb1_crst";
+ rst_temp = gtr_dev->usb1_crst;
+ goto error;
+ }
+
+ gtr_dev->usb0_hibrst = devm_reset_control_get(gtr_dev->dev,
+ "usb0_hibrst");
+ if (IS_ERR(gtr_dev->usb0_hibrst)) {
+ name = "usb0_hibrst";
+ rst_temp = gtr_dev->usb0_hibrst;
+ goto error;
+ }
+
+ gtr_dev->usb1_hibrst = devm_reset_control_get(gtr_dev->dev,
+ "usb1_hibrst");
+ if (IS_ERR(gtr_dev->usb1_hibrst)) {
+ name = "usb1_hibrst";
+ rst_temp = gtr_dev->usb1_hibrst;
+ goto error;
+ }
+
+ gtr_dev->usb0_apbrst = devm_reset_control_get(gtr_dev->dev,
+ "usb0_apbrst");
+ if (IS_ERR(gtr_dev->usb0_apbrst)) {
+ name = "usb0_apbrst";
+ rst_temp = gtr_dev->usb0_apbrst;
+ goto error;
+ }
+
+ gtr_dev->usb1_apbrst = devm_reset_control_get(gtr_dev->dev,
+ "usb1_apbrst");
+ if (IS_ERR(gtr_dev->usb1_apbrst)) {
+ name = "usb1_apbrst";
+ rst_temp = gtr_dev->usb1_apbrst;
+ goto error;
+ }
+
+ gtr_dev->gem0_rst = devm_reset_control_get(gtr_dev->dev, "gem0_rst");
+ if (IS_ERR(gtr_dev->gem0_rst)) {
+ name = "gem0_rst";
+ rst_temp = gtr_dev->gem0_rst;
+ goto error;
+ }
+
+ gtr_dev->gem1_rst = devm_reset_control_get(gtr_dev->dev, "gem1_rst");
+ if (IS_ERR(gtr_dev->gem1_rst)) {
+ name = "gem1_rst";
+ rst_temp = gtr_dev->gem1_rst;
+ goto error;
+ }
+
+ gtr_dev->gem2_rst = devm_reset_control_get(gtr_dev->dev, "gem2_rst");
+ if (IS_ERR(gtr_dev->gem2_rst)) {
+ name = "gem2_rst";
+ rst_temp = gtr_dev->gem2_rst;
+ goto error;
+ }
+
+ gtr_dev->gem3_rst = devm_reset_control_get(gtr_dev->dev, "gem3_rst");
+ if (IS_ERR(gtr_dev->gem3_rst)) {
+ name = "gem3_rst";
+ rst_temp = gtr_dev->gem3_rst;
+ goto error;
+ }
+
+ return 0;
+error:
+ dev_err(gtr_dev->dev, "failed to get %s reset signal\n", name);
+ return PTR_ERR(rst_temp);
+}
+
+/**
+ * xpsgtr_probe - The device probe function for driver initialization.
+ * @pdev: pointer to the platform device structure.
+ *
+ * Return: 0 for success and error value on failure
+ */
+static int xpsgtr_probe(struct platform_device *pdev)
+{
+ struct device_node *child, *np = pdev->dev.of_node;
+ struct xpsgtr_dev *gtr_dev;
+ struct phy_provider *provider;
+ struct phy *phy;
+ struct resource *res;
+ char *soc_rev;
+ int lanecount, port = 0, index = 0;
+ int err;
+
+ if (of_device_is_compatible(np, "xlnx,zynqmp-psgtr"))
+ dev_warn(&pdev->dev, "This binding is deprecated, please use new compatible binding\n");
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
+ if (!gtr_dev)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "serdes");
+ gtr_dev->serdes = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gtr_dev->serdes))
+ return PTR_ERR(gtr_dev->serdes);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "siou");
+ gtr_dev->siou = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gtr_dev->siou))
+ return PTR_ERR(gtr_dev->siou);
+
+ lanecount = of_get_child_count(np);
+ if (lanecount > MAX_LANES || lanecount == 0)
+ return -EINVAL;
+
+ gtr_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * lanecount,
+ GFP_KERNEL);
+ if (!gtr_dev->phys)
+ return -ENOMEM;
+
+ gtr_dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, gtr_dev);
+ mutex_init(&gtr_dev->gtr_mutex);
+
+ /* Deferred probe is also handled if nvmem is not ready */
+ soc_rev = zynqmp_nvmem_get_silicon_version(&pdev->dev,
+ "soc_revision");
+ if (IS_ERR(soc_rev))
+ return PTR_ERR(soc_rev);
+
+ if (*soc_rev == ZYNQMP_SILICON_V1)
+ gtr_dev->tx_term_fix = true;
+
+ kfree(soc_rev);
+
+ err = xpsgtr_get_resets(gtr_dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get resets: %d\n", err);
+ return err;
+ }
+
+ for_each_child_of_node(np, child) {
+ struct xpsgtr_phy *gtr_phy;
+
+ gtr_phy = devm_kzalloc(&pdev->dev, sizeof(*gtr_phy),
+ GFP_KERNEL);
+ if (!gtr_phy)
+ return -ENOMEM;
+
+ /* Assign lane number to gtr_phy instance */
+ gtr_phy->lane = index;
+
+ /* Disable lane sharing as default */
+ gtr_phy->share_laneclk = -1;
+
+ gtr_dev->phys[port] = gtr_phy;
+ phy = devm_phy_create(&pdev->dev, child, &xpsgtr_phyops);
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "failed to create PHY\n");
+ return PTR_ERR(phy);
+ }
+ gtr_dev->phys[port]->phy = phy;
+ phy_set_drvdata(phy, gtr_dev->phys[port]);
+ gtr_phy->data = gtr_dev;
+ port++;
+ index++;
+ }
+ provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(&pdev->dev, "registering provider failed\n");
+ return PTR_ERR(provider);
+ }
+ return 0;
+}
+
+static int xpsgtr_suspend(struct device *dev)
+{
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+
+ /* Save the ICM_CFG registers */
+ gtr_dev->saved_icm_cfg0 = readl(gtr_dev->serdes + ICM_CFG0);
+ gtr_dev->saved_icm_cfg1 = readl(gtr_dev->serdes + ICM_CFG1);
+
+ return 0;
+}
+
+static int xpsgtr_resume(struct device *dev)
+{
+ unsigned int icm_cfg0, icm_cfg1, index;
+ bool skip_phy_init;
+ struct xpsgtr_phy *gtr_phy;
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+
+ icm_cfg0 = readl(gtr_dev->serdes + ICM_CFG0);
+ icm_cfg1 = readl(gtr_dev->serdes + ICM_CFG1);
+
+ /* Just return if no gt lanes got configured before suspend */
+ if (!gtr_dev->saved_icm_cfg0 && !gtr_dev->saved_icm_cfg1)
+ return 0;
+
+ /* Check if the ICM configurations changed after suspend */
+ if (icm_cfg0 == gtr_dev->saved_icm_cfg0 &&
+ icm_cfg1 == gtr_dev->saved_icm_cfg1)
+ skip_phy_init = true;
+ else
+ skip_phy_init = false;
+
+ /* This below updates the skip_phy_init for all gtr_phy instances `*/
+ for (index = 0; index < of_get_child_count(dev->of_node); index++) {
+ gtr_phy = gtr_dev->phys[index];
+ gtr_phy->skip_phy_init = skip_phy_init;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops xpsgtr_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xpsgtr_suspend, xpsgtr_resume)
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id xpsgtr_of_match[] = {
+ { .compatible = "xlnx,zynqmp-psgtr", },
+ { .compatible = "xlnx,zynqmp-psgtr-v1.1", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
+
+static struct platform_driver xpsgtr_driver = {
+ .probe = xpsgtr_probe,
+ .driver = {
+ .name = "xilinx-psgtr",
+ .of_match_table = xpsgtr_of_match,
+ .pm = &xpsgtr_pm_ops,
+ },
+};
+
+module_platform_driver(xpsgtr_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Xilinx ZynqMP High speed Gigabit Transceiver");
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index b372419d61f2..5db95e3c96bb 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -372,6 +372,14 @@ config PINCTRL_RK805
help
This selects the pinctrl driver for RK805.
+config PINCTRL_ZYNQMP
+ bool "Pinctrl driver for Xilinx ZynqMP"
+ depends on ARCH_ZYNQMP
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ This selects the pinctrl driver for Xilinx ZynqMP.
+
config PINCTRL_OCELOT
bool "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
depends on OF
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index ac537fdbc998..a779d0465dad 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
+obj-$(CONFIG_PINCTRL_ZYNQMP) += pinctrl-zynqmp.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
obj-y += actions/
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
new file mode 100644
index 000000000000..9b3b4dc88a1f
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -0,0 +1,1074 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ZynqMP pin controller
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ * Chirag Parekh <chirag.parekh@xilinx.com>
+ */
+
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include "core.h"
+#include "pinctrl-utils.h"
+
+#define ZYNQMP_PIN_PREFIX "MIO"
+#define PINCTRL_GET_FUNC_NAME_RESP_LEN 16
+#define MAX_FUNC_NAME_LEN 16
+#define MAX_GROUP_PIN 50
+#define END_OF_FUNCTIONS "END_OF_FUNCTIONS"
+#define NUM_GROUPS_PER_RESP 6
+
+#define PINCTRL_GET_FUNC_GROUPS_RESP_LEN 12
+#define PINCTRL_GET_PIN_GROUPS_RESP_LEN 12
+#define NA_GROUP -1
+#define RESERVED_GROUP -2
+
+/**
+ * struct zynqmp_pmux_function - a pinmux function
+ * @name: Name of the pinmux function
+ * @groups: List of pingroups for this function
+ * @ngroups: Number of entries in @groups
+ * @node:` Firmware node matching with for function
+ *
+ * This structure holds information about pin control function
+ * and function group names supporting that function.
+ */
+struct zynqmp_pmux_function {
+ char name[MAX_FUNC_NAME_LEN];
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+/**
+ * struct zynqmp_pinctrl - driver data
+ * @pctrl: Pinctrl device
+ * @groups: Pingroups
+ * @ngroups: Number of @groups
+ * @funcs: Pinmux functions
+ * @nfuncs: Number of @funcs
+ *
+ * This struct is stored as driver data and used to retrieve
+ * information regarding pin control functions, groups and
+ * group pins.
+ */
+struct zynqmp_pinctrl {
+ struct pinctrl_dev *pctrl;
+ const struct zynqmp_pctrl_group *groups;
+ unsigned int ngroups;
+ const struct zynqmp_pmux_function *funcs;
+ unsigned int nfuncs;
+};
+
+/**
+ * struct zynqmp_pctrl_group - Pin control group info
+ * @name: Group name
+ * @pins: Group pin numbers
+ * @npins: Number of pins in group
+ */
+struct zynqmp_pctrl_group {
+ const char *name;
+ unsigned int pins[MAX_GROUP_PIN];
+ unsigned int npins;
+};
+
+/**
+ * enum zynqmp_pin_config_param - possible pin configuration parameters
+ * @PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard,
+ * the argument to this parameter (on a
+ * custom format) tells the driver which
+ * alternative IO standard to use
+ * @PIN_CONFIG_SCHMITTCMOS: this parameter (on a custom format) allows
+ * to select schmitt or cmos input for MIO pins
+ */
+enum zynqmp_pin_config_param {
+ PIN_CONFIG_IOSTANDARD = PIN_CONFIG_END + 1,
+ PIN_CONFIG_SCHMITTCMOS,
+};
+
+static const struct pinconf_generic_params zynqmp_dt_params[] = {
+ {"io-standard", PIN_CONFIG_IOSTANDARD, IO_STANDARD_LVCMOS18},
+ {"schmitt-cmos", PIN_CONFIG_SCHMITTCMOS, PIN_INPUT_TYPE_SCHMITT},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct
+pin_config_item zynqmp_conf_items[ARRAY_SIZE(zynqmp_dt_params)] = {
+ PCONFDUMP(PIN_CONFIG_IOSTANDARD, "IO-standard", NULL, true),
+ PCONFDUMP(PIN_CONFIG_SCHMITTCMOS, "schmitt-cmos", NULL, true),
+};
+#endif
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+static const struct pinctrl_pin_desc zynqmp_pins;
+static struct pinctrl_desc zynqmp_desc;
+
+/**
+ * zynqmp_pctrl_get_groups_count() - get group count
+ * @pctldev: Pincontrol device pointer.
+ *
+ * Get total groups count.
+ *
+ * Return: group count.
+ */
+static int zynqmp_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->ngroups;
+}
+
+/**
+ * zynqmp_pctrl_get_group_name() - get group name
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Group ID.
+ *
+ * Get gorup's name.
+ *
+ * Return: group name.
+ */
+static const char *zynqmp_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->groups[selector].name;
+}
+
+/**
+ * zynqmp_pctrl_get_group_pins() - get group pins
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Group ID.
+ * @pins: Pin numbers.
+ * @npins: Number of pins in group.
+ *
+ * Get gorup's pin count and pin number.
+ *
+ * Return: Success.
+ */
+static int zynqmp_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *npins)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pctrl->groups[selector].pins;
+ *npins = pctrl->groups[selector].npins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops zynqmp_pctrl_ops = {
+ .get_groups_count = zynqmp_pctrl_get_groups_count,
+ .get_group_name = zynqmp_pctrl_get_group_name,
+ .get_group_pins = zynqmp_pctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+/**
+ * zynqmp_pinmux_request_pin() - Request a pin for muxing
+ * @pctldev: Pincontrol device pointer.
+ * @pin: Pin number.
+ *
+ * Request a pin from firmware for muxing.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinmux_request_pin(struct pinctrl_dev *pctldev,
+ unsigned int pin)
+{
+ int ret;
+
+ if (!eemi_ops->pinctrl_request)
+ return -ENOTSUPP;
+
+ ret = eemi_ops->pinctrl_request(pin);
+ if (ret) {
+ dev_err(pctldev->dev, "request failed for pin %u\n", pin);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pmux_get_functions_count() - get number of functions
+ * @pctldev: Pincontrol device pointer.
+ *
+ * Get total function count.
+ *
+ * Return: function count.
+ */
+static int zynqmp_pmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->nfuncs;
+}
+
+/**
+ * zynqmp_pmux_get_function_name() - get function name
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Function ID.
+ *
+ * Get function's name.
+ *
+ * Return: function name.
+ */
+static const char *zynqmp_pmux_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->funcs[selector].name;
+}
+
+/**
+ * zynqmp_pmux_get_function_groups() - Get groups for the function
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Function ID
+ * @groups: Group names.
+ * @num_groups: Number of function groups.
+ *
+ * Get function's group count and group names.
+ *
+ * Return: Success.
+ */
+static int zynqmp_pmux_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctrl->funcs[selector].groups;
+ *num_groups = pctrl->funcs[selector].ngroups;
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinmux_set_mux() - Set requested function for the group
+ * @pctldev: Pincontrol device pointer.
+ * @function: Function ID.
+ * @group: Group ID.
+ *
+ * Loop though all pins of group and call firmware API
+ * to set requested function for all pins in group.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ unsigned int group)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct zynqmp_pctrl_group *pgrp = &pctrl->groups[group];
+ int ret, i;
+
+ if (!eemi_ops->pinctrl_set_function)
+ return -ENOTSUPP;
+
+ for (i = 0; i < pgrp->npins; i++) {
+ unsigned int pin = pgrp->pins[i];
+
+ ret = eemi_ops->pinctrl_set_function(pin, function);
+ if (ret) {
+ dev_err(pctldev->dev, "set mux failed for pin %u\n",
+ pin);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinmux_release_pin() - Release a pin
+ * @pctldev: Pincontrol device pointer.
+ * @pin: Pin number.
+ *
+ * Release a pin from firmware.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinmux_release_pin(struct pinctrl_dev *pctldev,
+ unsigned int pin)
+{
+ int ret;
+
+ if (!eemi_ops->pinctrl_release)
+ return -ENOTSUPP;
+
+ ret = eemi_ops->pinctrl_release(pin);
+ if (ret) {
+ dev_err(pctldev->dev, "free pin failed for pin %u\n",
+ pin);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops zynqmp_pinmux_ops = {
+ .request = zynqmp_pinmux_request_pin,
+ .get_functions_count = zynqmp_pmux_get_functions_count,
+ .get_function_name = zynqmp_pmux_get_function_name,
+ .get_function_groups = zynqmp_pmux_get_function_groups,
+ .set_mux = zynqmp_pinmux_set_mux,
+ .free = zynqmp_pinmux_release_pin,
+};
+
+/**
+ * zynqmp_pinconf_cfg_get() - get config value for the pin
+ * @pctldev: Pin control device pointer.
+ * @pin: Pin number.
+ * @config: Value of config param.
+ *
+ * Get value of the requested configuration parameter for the
+ * given pin.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_cfg_get(struct pinctrl_dev *pctldev,
+ unsigned int pin,
+ unsigned long *config)
+{
+ int ret;
+ unsigned int arg = 0, param = pinconf_to_config_param(*config);
+
+ if (!eemi_ops->pinctrl_get_config)
+ return -ENOTSUPP;
+
+ if (pin >= zynqmp_desc.npins)
+ return -ENOTSUPP;
+
+ switch (param) {
+ case PIN_CONFIG_SLEW_RATE:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_SLEW_RATE,
+ &arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ &arg);
+ if (arg != PM_PINCTRL_BIAS_PULL_UP)
+ return -EINVAL;
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ &arg);
+ if (arg != PM_PINCTRL_BIAS_PULL_DOWN)
+ return -EINVAL;
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_BIAS_STATUS,
+ &arg);
+ if (arg != PM_PINCTRL_BIAS_DISABLE)
+ return -EINVAL;
+ arg = 1;
+ break;
+ case PIN_CONFIG_IOSTANDARD:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS,
+ &arg);
+ break;
+ case PIN_CONFIG_SCHMITTCMOS:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS,
+ &arg);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH,
+ &arg);
+ switch (arg) {
+ case PM_PINCTRL_DRIVE_STRENGTH_2MA:
+ arg = DRIVE_STRENGTH_2MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_4MA:
+ arg = DRIVE_STRENGTH_4MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_8MA:
+ arg = DRIVE_STRENGTH_8MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_12MA:
+ arg = DRIVE_STRENGTH_12MA;
+ break;
+ default:
+ /* Invalid drive strength */
+ dev_warn(pctldev->dev,
+ "Invalid drive strength for pin %d\n",
+ pin);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+/**
+ * zynqmp_pinconf_cfg_set() - Set requested config for the pin
+ * @pctldev: Pincontrol device pointer.
+ * @pin: Pin number.
+ * @configs: Configuration to set.
+ * @num_groups: Number of configurations.
+ *
+ * Loop though all configurations and call firmware API
+ * to set requested configurations for the pin.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+
+ if (!eemi_ops->pinctrl_set_config)
+ return -ENOTSUPP;
+
+ if (pin >= zynqmp_desc.npins)
+ return -ENOTSUPP;
+
+ for (i = 0; i < num_configs; i++) {
+ unsigned int param = pinconf_to_config_param(configs[i]);
+ unsigned int arg = pinconf_to_config_argument(configs[i]);
+ unsigned int value;
+
+ switch (param) {
+ case PIN_CONFIG_SLEW_RATE:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_SLEW_RATE,
+ arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ PM_PINCTRL_BIAS_PULL_UP);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ PM_PINCTRL_BIAS_PULL_DOWN);
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_BIAS_STATUS,
+ PM_PINCTRL_BIAS_DISABLE);
+ break;
+ case PIN_CONFIG_SCHMITTCMOS:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS,
+ arg);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ switch (arg) {
+ case DRIVE_STRENGTH_2MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_2MA;
+ break;
+ case DRIVE_STRENGTH_4MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_4MA;
+ break;
+ case DRIVE_STRENGTH_8MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_8MA;
+ break;
+ case DRIVE_STRENGTH_12MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_12MA;
+ break;
+ default:
+ /* Invalid drive strength */
+ dev_warn(pctldev->dev,
+ "Invalid drive strength for pin %d\n",
+ pin);
+ return -EINVAL;
+ }
+
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH,
+ value);
+ break;
+ case PIN_CONFIG_IOSTANDARD:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS,
+ &value);
+
+ if (arg != value)
+ dev_warn(pctldev->dev,
+ "Invalid IO Standard requested for pin %d\n",
+ pin);
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ case PIN_CONFIG_LOW_POWER_MODE:
+ /*
+ * This cases are mentioned in dts but configurable
+ * registers are unknown. So falling through to ignore
+ * boot time warnings as of now.
+ */
+ ret = 0;
+ break;
+ default:
+ dev_warn(pctldev->dev,
+ "unsupported configuration parameter '%u'\n",
+ param);
+ ret = -ENOTSUPP;
+ break;
+ }
+ if (ret)
+ dev_warn(pctldev->dev,
+ "%s failed: pin %u param %u value %u\n",
+ __func__, pin, param, arg);
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinconf_group_set() - Set requested config for the group
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Group ID.
+ * @configs: Configuration to set.
+ * @num_groups: Number of configurations.
+ *
+ * Call function to set configs for each pin in group.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct zynqmp_pctrl_group *pgrp = &pctrl->groups[selector];
+
+ for (i = 0; i < pgrp->npins; i++) {
+ ret = zynqmp_pinconf_cfg_set(pctldev, pgrp->pins[i], configs,
+ num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops zynqmp_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = zynqmp_pinconf_cfg_get,
+ .pin_config_set = zynqmp_pinconf_cfg_set,
+ .pin_config_group_set = zynqmp_pinconf_group_set,
+};
+
+static struct pinctrl_desc zynqmp_desc = {
+ .name = "zynqmp_pinctrl",
+ .owner = THIS_MODULE,
+ .pctlops = &zynqmp_pctrl_ops,
+ .pmxops = &zynqmp_pinmux_ops,
+ .confops = &zynqmp_pinconf_ops,
+};
+
+/**
+ * zynqmp_pinctrl_get_function_groups() - get groups for the function
+ * @fid: Function ID.
+ * @index: Group index.
+ * @groups: Groups data.
+ *
+ * Call firmware API to get groups for the given function.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_function_groups(u32 fid, u32 index, u16 *groups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_FUNCTION_GROUPS;
+ qdata.arg1 = fid;
+ qdata.arg2 = index;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ memcpy(groups, &ret_payload[1], PINCTRL_GET_FUNC_GROUPS_RESP_LEN);
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_get_func_num_groups() - get number of groups in function
+ * @fid: Function ID.
+ * @ngroups: Number of groups in function.
+ *
+ * Call firmware API to get number of group in function.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_func_num_groups(u32 fid, unsigned int *ngroups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS;
+ qdata.arg1 = fid;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ *ngroups = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_func_groups() - prepare function and groups data
+ * @dev: Device pointer.
+ * @fid: Function ID.
+ * @func: Function data.
+ * @groups: Groups data.
+ *
+ * Query firmware to get group IDs for each function. Firmware returns
+ * group IDs. Based on gorup index for the function, group names in
+ * function are stored. For example, first gorup in "eth0" function
+ * is named as "eth0_0", second as "eth0_1" and so on.
+ *
+ * Based on group ID received from firmware, function stores name of
+ * group for that group ID. For an example, if "eth0" first group ID
+ * is x, groups[x] name will be stored as "eth0_0".
+ *
+ * Once done for each function, each function would have its group names,
+ * and each groups would also have their names.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_func_groups(struct device *dev, u32 fid,
+ struct zynqmp_pmux_function *func,
+ struct zynqmp_pctrl_group *groups)
+{
+ u16 resp[NUM_GROUPS_PER_RESP] = {0};
+ const char **fgroups;
+ int ret, index, i;
+
+ fgroups = devm_kzalloc(dev, sizeof(*fgroups) * func->ngroups,
+ GFP_KERNEL);
+ if (!fgroups)
+ return -ENOMEM;
+
+ for (index = 0; index < func->ngroups; index += NUM_GROUPS_PER_RESP) {
+ ret = zynqmp_pinctrl_get_function_groups(fid, index, resp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NUM_GROUPS_PER_RESP; i++) {
+ if (resp[i] == (u16)NA_GROUP)
+ goto done;
+ if (resp[i] == (u16)RESERVED_GROUP)
+ continue;
+ fgroups[index + i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s_%d_grp",
+ func->name,
+ index + i);
+ groups[resp[i]].name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s_%d_grp",
+ func->name,
+ index + i);
+ }
+ }
+done:
+ func->groups = fgroups;
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_get_function_name() - get function name
+ * @fid: Function ID.
+ * @name: Function name
+ *
+ * Call firmware API to get name of given function.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_function_name(u32 fid, char *name)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ qdata.qid = PM_QID_PINCTRL_GET_FUNCTION_NAME;
+ qdata.arg1 = fid;
+
+ eemi_ops->query_data(qdata, ret_payload);
+ memcpy(name, ret_payload, PINCTRL_GET_FUNC_NAME_RESP_LEN);
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinctrl_get_num_functions() - get number of supported functions
+ * @nfuncs: Number of functions.
+ *
+ * Call firmware API to get number of functions supported by system/board.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_num_functions(unsigned int *nfuncs)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_FUNCTIONS;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ *nfuncs = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_get_pin_groups() - get groups for the pin
+ * @pin: Pin number.
+ * @index: Group index.
+ * @groups: Groups data.
+ *
+ * Call firmware API to get groups for the given pin.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_pin_groups(u32 pin, u32 index, u16 *groups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_PIN_GROUPS;
+ qdata.arg1 = pin;
+ qdata.arg2 = index;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ memcpy(groups, &ret_payload[1], PINCTRL_GET_PIN_GROUPS_RESP_LEN);
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_group_add_pin() - add pin to given group
+ * @group: Group data.
+ * @pin: Pin number.
+ *
+ * Add pin number to respective group's pin array at end and
+ * increment pin count for the group.
+ *
+ * Return: 0 on success else error code.
+ */
+static void zynqmp_pinctrl_group_add_pin(struct zynqmp_pctrl_group *group,
+ unsigned int pin)
+{
+ group->pins[group->npins++] = pin;
+}
+
+/**
+ * zynqmp_pinctrl_create_pin_groups() - assign pins to respective groups
+ * @dev: Device pointer.
+ * @groups: Groups data.
+ * @pin: Pin number.
+ *
+ * Query firmware to get groups available for the given pin.
+ * Based on firmware response(group IDs for the pin), add
+ * pin number to respective group's pin array.
+ *
+ * Once all pins are queries, each groups would have its number
+ * of pins and pin numbers data.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_create_pin_groups(struct device *dev,
+ struct zynqmp_pctrl_group *groups,
+ unsigned int pin)
+{
+ int ret, i, index = 0;
+ u16 resp[NUM_GROUPS_PER_RESP] = {0};
+
+ do {
+ ret = zynqmp_pinctrl_get_pin_groups(pin, index, resp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NUM_GROUPS_PER_RESP; i++) {
+ if (resp[i] == (u16)NA_GROUP)
+ goto done;
+ if (resp[i] == (u16)RESERVED_GROUP)
+ continue;
+ zynqmp_pinctrl_group_add_pin(&groups[resp[i]], pin);
+ }
+ index += NUM_GROUPS_PER_RESP;
+ } while (1);
+
+done:
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_group_pins() - prepare each group's pin data
+ * @dev: Device pointer.
+ * @groups: Groups data.
+ * @ngroups: Number of groups.
+ *
+ * Prepare pin number and number of pins data for each pins.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_group_pins(struct device *dev,
+ struct zynqmp_pctrl_group *groups,
+ unsigned int ngroups)
+{
+ unsigned int pin;
+ int ret;
+
+ for (pin = 0; pin < zynqmp_desc.npins; pin++) {
+ ret = zynqmp_pinctrl_create_pin_groups(dev, groups, pin);
+ if (ret)
+ goto done;
+ }
+done:
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_function_info() - prepare function info
+ * @dev: Device pointer.
+ * @pctrl: Pin control driver data.
+ *
+ * Query firmware for functions, groups and pin information and
+ * prepare pin control driver data.
+ *
+ * Query number of functions and number of function groups (number
+ * of groups in given function) to allocate required memory buffers
+ * for functions and groups. Once buffers are allocated to store
+ * functions and groups data, query and store required information
+ * (numbe of groups and group names for each function, number of
+ * pins and pin numbers for each group).
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_function_info(struct device *dev,
+ struct zynqmp_pinctrl *pctrl)
+{
+ struct zynqmp_pmux_function *funcs;
+ struct zynqmp_pctrl_group *groups;
+ int ret, i;
+
+ ret = zynqmp_pinctrl_get_num_functions(&pctrl->nfuncs);
+ if (ret)
+ return ret;
+
+ funcs = devm_kzalloc(dev, sizeof(*funcs) * pctrl->nfuncs, GFP_KERNEL);
+ if (!funcs)
+ return -ENOMEM;
+
+ for (i = 0; i < pctrl->nfuncs; i++) {
+ zynqmp_pinctrl_get_function_name(i, funcs[i].name);
+
+ ret = zynqmp_pinctrl_get_func_num_groups(i, &funcs[i].ngroups);
+ if (ret)
+ goto err;
+ pctrl->ngroups += funcs[i].ngroups;
+ }
+
+ groups = devm_kzalloc(dev, sizeof(*groups) * pctrl->ngroups,
+ GFP_KERNEL);
+ if (!groups)
+ return -ENOMEM;
+
+ for (i = 0; i < pctrl->nfuncs; i++) {
+ ret = zynqmp_pinctrl_prepare_func_groups(dev, i, &funcs[i],
+ groups);
+ if (ret)
+ goto err;
+ }
+
+ ret = zynqmp_pinctrl_prepare_group_pins(dev, groups, pctrl->ngroups);
+ if (ret)
+ goto err;
+
+ pctrl->funcs = funcs;
+ pctrl->groups = groups;
+
+err:
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_get_num_pins() - get number of pins in system
+ * @npins: Number of pins in system/board.
+ *
+ * Call firmware API to get number of pins.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_num_pins(unsigned int *npins)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_PINS;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ *npins = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_pin_desc() - prepare pin description info
+ * @dev: Device pointer.
+ * @zynqmp_pins: Pin information.
+ * @npins: Number of pins.
+ *
+ * Query number of pins information from firmware and prepare pin
+ * description containing pin number and pin name.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev,
+ const struct pinctrl_pin_desc **zynqmp_pins,
+ unsigned int *npins)
+{
+ struct pinctrl_pin_desc *pins, *pin;
+ int ret;
+ int i;
+
+ ret = zynqmp_pinctrl_get_num_pins(npins);
+ if (ret)
+ return ret;
+
+ pins = devm_kzalloc(dev, sizeof(*pins) * *npins, GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < *npins; i++) {
+ pin = &pins[i];
+ pin->number = i;
+ pin->name = devm_kasprintf(dev, GFP_KERNEL, "%s%d",
+ ZYNQMP_PIN_PREFIX, i);
+ }
+
+ *zynqmp_pins = pins;
+
+ return 0;
+}
+
+static int zynqmp_pinctrl_probe(struct platform_device *pdev)
+{
+ struct zynqmp_pinctrl *pctrl;
+ int ret;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,pinctrl-zynqmp")) {
+ dev_err(&pdev->dev, "ERROR: This binding is deprecated, please use new compatible binding\n");
+ return -ENOENT;
+ }
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ if (!eemi_ops->query_data) {
+ dev_err(&pdev->dev, "%s: Firmware interface not available\n",
+ __func__);
+ ret = -ENOTSUPP;
+ goto err;
+ }
+
+ ret = zynqmp_pinctrl_prepare_pin_desc(&pdev->dev,
+ &zynqmp_desc.pins,
+ &zynqmp_desc.npins);
+ if (ret) {
+ dev_err(&pdev->dev, "%s() pin desc prepare fail with %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ ret = zynqmp_pinctrl_prepare_function_info(&pdev->dev, pctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "%s() function info prepare fail with %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ pctrl->pctrl = pinctrl_register(&zynqmp_desc, &pdev->dev, pctrl);
+ if (IS_ERR(pctrl->pctrl)) {
+ ret = PTR_ERR(pctrl->pctrl);
+ goto err;
+ }
+ platform_set_drvdata(pdev, pctrl);
+
+ dev_info(&pdev->dev, "zynqmp pinctrl initialized\n");
+err:
+ return ret;
+}
+
+static int zynqmp_pinctrl_remove(struct platform_device *pdev)
+{
+ struct zynqmp_pinctrl *pctrl = platform_get_drvdata(pdev);
+
+ pinctrl_unregister(pctrl->pctrl);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_pinctrl_of_match[] = {
+ { .compatible = "xlnx,zynqmp-pinctrl" },
+ { .compatible = "xlnx,pinctrl-zynqmp" },
+ { }
+};
+
+static struct platform_driver zynqmp_pinctrl_driver = {
+ .driver = {
+ .name = "zynqmp-pinctrl",
+ .of_match_table = zynqmp_pinctrl_of_match,
+ },
+ .probe = zynqmp_pinctrl_probe,
+ .remove = zynqmp_pinctrl_remove,
+};
+
+static int __init zynqmp_pinctrl_init(void)
+{
+ return platform_driver_register(&zynqmp_pinctrl_driver);
+}
+arch_initcall(zynqmp_pinctrl_init);
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 164e82b2b53c..2255469a4b44 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -196,6 +196,26 @@ config ST_REMOTEPROC
processor framework.
This can be either built-in or a loadable module.
+config ZYNQ_REMOTEPROC
+ tristate "Support ZYNQ remoteproc"
+ depends on ARCH_ZYNQ && SMP && !DEBUG_SG
+ select RPMSG_VIRTIO
+ select HOTPLUG_CPU
+ select SRAM
+ help
+ Say y here to support Xilinx ZynQ remote processors (the second
+ ARM CORTEX-A9 cpu) via the remote processor framework.
+
+config ZYNQMP_R5_REMOTEPROC
+ tristate "ZynqMP_r5 remoteproc support"
+ depends on ARM64 && PM && ARCH_ZYNQMP
+ select RPMSG_VIRTIO
+ select MAILBOX
+ select ZYNQMP_IPI_MBOX
+ help
+ Say y here to support ZynqMP R5 remote processors via the remote
+ processor framework.
+
config ST_SLIM_REMOTEPROC
tristate
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index ce5d061e92be..e5232929c20e 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -25,4 +25,6 @@ obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
qcom_wcnss_pil-y += qcom_wcnss.o
qcom_wcnss_pil-y += qcom_wcnss_iris.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
+obj-$(CONFIG_ZYNQ_REMOTEPROC) += zynq_remoteproc.o
+obj-$(CONFIG_ZYNQMP_R5_REMOTEPROC) += zynqmp_r5_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index a87750903785..bfcccd5e32b5 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -108,4 +108,27 @@ struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc,
return NULL;
}
+static inline
+bool rproc_allow_sysfs_kick(struct rproc *rproc)
+{
+ return (rproc->sysfs_kick) ? true : false;
+}
+
+static inline
+bool rproc_peek_remote_kick(struct rproc *rproc, char *buf, size_t *len)
+{
+ if (rproc->ops->peek_remote_kick)
+ return rproc->ops->peek_remote_kick(rproc, buf, len);
+ else
+ return false;
+}
+
+static inline
+void rproc_ack_remote_kick(struct rproc *rproc)
+{
+ if (rproc->ops->ack_remote_kick)
+ rproc->ops->ack_remote_kick(rproc);
+}
+
+int rproc_create_kick_sysfs(struct rproc *rproc);
#endif /* REMOTEPROC_INTERNAL_H */
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index fa4131930106..3c5ba3df4003 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -113,6 +113,111 @@ static ssize_t state_store(struct device *dev,
}
static DEVICE_ATTR_RW(state);
+/**
+ * kick_store() - Kick remote from sysfs.
+ * @dev: remoteproc device
+ * @attr: sysfs device attribute
+ * @buf: sysfs buffer
+ * @count: size of the contents in buf
+ *
+ * It will just raise a signal, no content is expected for now.
+ *
+ * Return: the input count if it allows kick from sysfs,
+ * as it is always expected to succeed.
+ */
+static ssize_t kick_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+ int id;
+ size_t cpy_len;
+
+ (void)attr;
+ cpy_len = count <= sizeof(id) ? count : sizeof(id);
+ memcpy((char *)(&id), buf, cpy_len);
+
+ if (rproc->ops->kick)
+ rproc->ops->kick(rproc, id);
+ else
+ count = -EINVAL;
+ return count;
+}
+static DEVICE_ATTR_WO(kick);
+
+/**
+ * remote_kick_show() - Check if remote has kicked
+ * @dev: remoteproc device
+ * @attr: sysfs device attribute
+ * @buf: sysfs buffer
+ *
+ * It will check if the remote has kicked.
+ *
+ * Return: 2 if it allows kick from sysfs, and the value in the sysfs buffer
+ * shows if the remote has kicked. '0' - not kicked, '1' - kicked.
+ */
+static ssize_t remote_kick_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ buf[0] = '0';
+ buf[1] = '\n';
+ if (rproc_peek_remote_kick(rproc, NULL, NULL))
+ buf[0] += 1;
+ return 2;
+}
+
+/**
+ * remote_kick_store() - Ack the kick from remote
+ * @dev: remoteproc device
+ * @attr: sysfs device attribute
+ * @buf: sysfs buffer
+ * @count: size of the contents in buf
+ *
+ * It will ack the remote, no response contents is expected.
+ *
+ * Return: the input count if it allows kick from sysfs,
+ * as it is always expected to succeed.
+ */
+static ssize_t remote_kick_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ rproc_ack_remote_kick(rproc);
+ return count;
+}
+static DEVICE_ATTR_RW(remote_kick);
+
+/**
+ * remote_pending_message_show() - Show pending message sent from remote
+ * @dev: remoteproc device
+ * @attr: sysfs device attribute
+ * @buf: sysfs buffer
+ *
+ * It shows the pending message sent from remote
+ *
+ * Return: length of pending remote message.
+ */
+static ssize_t remote_pending_message_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+ size_t len;
+
+ if (rproc_peek_remote_kick(rproc, buf, &len)) {
+ buf[len] = '0';
+ return len;
+ } else {
+ return -EAGAIN;
+ }
+}
+static DEVICE_ATTR_RO(remote_pending_message);
+
static struct attribute *rproc_attrs[] = {
&dev_attr_firmware.attr,
&dev_attr_state.attr,
@@ -133,6 +238,41 @@ struct class rproc_class = {
.dev_groups = rproc_devgroups,
};
+/**
+ * rproc_create_kick_sysfs() - create kick remote sysfs entry
+ * @rproc: remoteproc
+ *
+ * It will create kick remote sysfs entry if kick remote
+ * from sysfs is allowed.
+ *
+ * Return: 0 for success, and negative value for failure.
+ */
+int rproc_create_kick_sysfs(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ if (!rproc_allow_sysfs_kick(rproc))
+ return -EINVAL;
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_kick.attr);
+ if (ret) {
+ dev_err(dev, "failed to create sysfs for kick.\n");
+ return ret;
+ }
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_remote_kick.attr);
+ if (ret) {
+ dev_err(dev, "failed to create sysfs for remote kick.\n");
+ return ret;
+ }
+ ret = sysfs_create_file(&dev->kobj,
+ &dev_attr_remote_pending_message.attr);
+ if (ret)
+ dev_err(dev,
+ "failed to create sysfs for remote pending message.\n");
+ return ret;
+}
+EXPORT_SYMBOL(rproc_create_kick_sysfs);
+
int __init rproc_init_sysfs(void)
{
/* create remoteproc device class for sysfs */
diff --git a/drivers/remoteproc/zynq_remoteproc.c b/drivers/remoteproc/zynq_remoteproc.c
new file mode 100644
index 000000000000..fd9c61f77ad7
--- /dev/null
+++ b/drivers/remoteproc/zynq_remoteproc.c
@@ -0,0 +1,479 @@
+/*
+ * Zynq Remote Processor driver
+ *
+ * Copyright (C) 2012 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2012 PetaLogix
+ *
+ * Based on origin OMAP Remote Processor driver
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/remoteproc.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/smp.h>
+#include <linux/irqchip/arm-gic.h>
+#include <asm/outercache.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/genalloc.h>
+#include <../../arch/arm/mach-zynq/common.h>
+
+#include "remoteproc_internal.h"
+
+#define MAX_NUM_VRINGS 2
+#define NOTIFYID_ANY (-1)
+/* Maximum on chip memories used by the driver*/
+#define MAX_ON_CHIP_MEMS 32
+
+/* Structure for storing IRQs */
+struct irq_list {
+ int irq;
+ struct list_head list;
+};
+
+/* Structure for IPIs */
+struct ipi_info {
+ u32 irq;
+ u32 notifyid;
+ bool pending;
+};
+
+/* On-chip memory pool element */
+struct mem_pool_st {
+ struct list_head node;
+ struct gen_pool *pool;
+};
+
+/* Private data */
+struct zynq_rproc_pdata {
+ struct irq_list irqs;
+ struct rproc *rproc;
+ struct ipi_info ipis[MAX_NUM_VRINGS];
+ struct list_head mem_pools;
+ struct list_head mems;
+ u32 mem_start;
+ u32 mem_end;
+};
+
+static bool autoboot __read_mostly;
+
+/* Store rproc for IPI handler */
+static struct rproc *rproc;
+static struct work_struct workqueue;
+
+static void handle_event(struct work_struct *work)
+{
+ struct zynq_rproc_pdata *local = rproc->priv;
+
+ if (rproc_vq_interrupt(local->rproc, local->ipis[0].notifyid) ==
+ IRQ_NONE)
+ dev_dbg(rproc->dev.parent, "no message found in vqid 0\n");
+}
+
+static void ipi_kick(void)
+{
+ dev_dbg(rproc->dev.parent, "KICK Linux because of pending message\n");
+ schedule_work(&workqueue);
+}
+
+static void kick_pending_ipi(struct rproc *rproc)
+{
+ struct zynq_rproc_pdata *local = rproc->priv;
+ int i;
+
+ for (i = 0; i < MAX_NUM_VRINGS; i++) {
+
+ /* Send swirq to firmware */
+ if (local->ipis[i].pending == true) {
+ gic_raise_softirq(cpumask_of(1),
+ local->ipis[i].irq);
+ local->ipis[i].pending = false;
+ }
+ }
+}
+
+static int zynq_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+ INIT_WORK(&workqueue, handle_event);
+
+ ret = cpu_down(1);
+ /* EBUSY means CPU is already released */
+ if (ret && (ret != -EBUSY)) {
+ dev_err(dev, "Can't release cpu1\n");
+ return ret;
+ }
+
+ ret = zynq_cpun_start(rproc->bootaddr, 1);
+ /* Trigger pending kicks */
+ kick_pending_ipi(rproc);
+
+ return ret;
+}
+
+/* kick a firmware */
+static void zynq_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynq_rproc_pdata *local = rproc->priv;
+ struct rproc_vdev *rvdev, *rvtmp;
+ struct fw_rsc_vdev *rsc;
+ int i;
+
+ dev_dbg(dev, "KICK Firmware to start send messages vqid %d\n", vqid);
+
+ list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) {
+ rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
+ for (i = 0; i < MAX_NUM_VRINGS; i++) {
+ struct rproc_vring *rvring = &rvdev->vring[i];
+
+ /* Send swirq to firmware */
+ if (rvring->notifyid == vqid) {
+ local->ipis[i].notifyid = vqid;
+ /* As we do not turn off CPU1 until start,
+ * we delay firmware kick
+ */
+ if (rproc->state == RPROC_RUNNING)
+ gic_raise_softirq(cpumask_of(1),
+ local->ipis[i].irq);
+ else
+ local->ipis[i].pending = true;
+ }
+ }
+
+ }
+}
+
+/* power off the remote processor */
+static int zynq_rproc_stop(struct rproc *rproc)
+{
+ int ret;
+ struct device *dev = rproc->dev.parent;
+
+ dev_dbg(rproc->dev.parent, "%s\n", __func__);
+
+ /* Cpu can't be power on - for example in nosmp mode */
+ ret = cpu_up(1);
+ if (ret)
+ dev_err(dev, "Can't power on cpu1 %d\n", ret);
+
+ return 0;
+}
+
+static void *zynq_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct rproc_mem_entry *mem;
+ void *va = 0;
+ struct zynq_rproc_pdata *local = rproc->priv;
+
+ list_for_each_entry(mem, &local->mems, node) {
+ int offset = da - mem->da;
+
+ /* try next carveout if da is too small */
+ if (offset < 0)
+ continue;
+
+ /* try next carveout if da is too large */
+ if (offset + len > mem->len)
+ continue;
+
+ va = mem->va + offset;
+
+ break;
+ }
+ return va;
+}
+
+static struct rproc_ops zynq_rproc_ops = {
+ .start = zynq_rproc_start,
+ .stop = zynq_rproc_stop,
+ .kick = zynq_rproc_kick,
+ .da_to_va = zynq_rproc_da_to_va,
+};
+
+/* Just to detect bug if interrupt forwarding is broken */
+static irqreturn_t zynq_remoteproc_interrupt(int irq, void *dev_id)
+{
+ struct device *dev = dev_id;
+
+ dev_err(dev, "GIC IRQ %d is not forwarded correctly\n", irq);
+
+ /*
+ * MS: Calling this function doesn't need to be BUG
+ * especially for cases where firmware doesn't disable
+ * interrupts. In next probing can be som interrupts pending.
+ * The next scenario is for cases when you want to monitor
+ * non frequent interrupt through Linux kernel. Interrupt happen
+ * and it is forwarded to Linux which update own statistic
+ * in (/proc/interrupt) and forward it to firmware.
+ *
+ * gic_set_cpu(1, irq); - setup cpu1 as destination cpu
+ * gic_raise_softirq(cpumask_of(1), irq); - forward irq to firmware
+ */
+
+ gic_set_cpu(1, irq);
+ return IRQ_HANDLED;
+}
+
+static void clear_irq(struct rproc *rproc)
+{
+ struct list_head *pos, *q;
+ struct irq_list *tmp;
+ struct zynq_rproc_pdata *local = rproc->priv;
+
+ dev_info(rproc->dev.parent, "Deleting the irq_list\n");
+ list_for_each_safe(pos, q, &local->irqs.list) {
+ tmp = list_entry(pos, struct irq_list, list);
+ free_irq(tmp->irq, rproc->dev.parent);
+ gic_set_cpu(0, tmp->irq);
+ list_del(pos);
+ kfree(tmp);
+ }
+}
+
+static int zynq_rproc_add_mems(struct zynq_rproc_pdata *pdata)
+{
+ struct mem_pool_st *mem_node;
+ size_t mem_size;
+ struct gen_pool *mem_pool;
+ struct rproc_mem_entry *mem;
+ dma_addr_t dma;
+ void *va;
+ struct device *dev = pdata->rproc->dev.parent;
+
+ list_for_each_entry(mem_node, &pdata->mem_pools, node) {
+ mem_pool = mem_node->pool;
+ mem_size = gen_pool_size(mem_pool);
+ mem = devm_kzalloc(dev, sizeof(struct rproc_mem_entry),
+ GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ va = gen_pool_dma_alloc(mem_pool, mem_size, &dma);
+ if (!va) {
+ dev_err(dev, "Failed to allocate dma carveout mem.\n");
+ return -ENOMEM;
+ }
+ mem->priv = (void *)mem_pool;
+ mem->va = va;
+ mem->len = mem_size;
+ mem->dma = dma;
+ mem->da = dma;
+ dev_dbg(dev, "%s: va = %p, da = 0x%x dma = 0x%x\n",
+ __func__, va, mem->da, mem->dma);
+ list_add_tail(&mem->node, &pdata->mems);
+ }
+ return 0;
+}
+
+static int zynq_remoteproc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct irq_list *tmp;
+ int count = 0;
+ struct zynq_rproc_pdata *local;
+ struct gen_pool *mem_pool = NULL;
+ struct mem_pool_st *mem_node = NULL;
+ int i;
+
+ rproc = rproc_alloc(&pdev->dev, dev_name(&pdev->dev),
+ &zynq_rproc_ops, NULL,
+ sizeof(struct zynq_rproc_pdata));
+ if (!rproc) {
+ dev_err(&pdev->dev, "rproc allocation failed\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+ local = rproc->priv;
+ local->rproc = rproc;
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
+ goto dma_mask_fault;
+ }
+
+ /* Init list for IRQs - it can be long list */
+ INIT_LIST_HEAD(&local->irqs.list);
+
+ /* Alloc IRQ based on DTS to be sure that no other driver will use it */
+ while (1) {
+ int irq;
+
+ irq = platform_get_irq(pdev, count++);
+ if (irq == -ENXIO || irq == -EINVAL)
+ break;
+
+ tmp = kzalloc(sizeof(struct irq_list), GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto irq_fault;
+ }
+
+ tmp->irq = irq;
+
+ dev_dbg(&pdev->dev, "%d: Alloc irq: %d\n", count, tmp->irq);
+
+ /* Allocating shared IRQs will ensure that any module will
+ * use these IRQs
+ */
+ ret = request_irq(tmp->irq, zynq_remoteproc_interrupt, 0,
+ dev_name(&pdev->dev), &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "IRQ %d already allocated\n",
+ tmp->irq);
+ goto irq_fault;
+ }
+
+ /*
+ * MS: Here is place for detecting problem with firmware
+ * which doesn't work correctly with interrupts
+ *
+ * MS: Comment if you want to count IRQs on Linux
+ */
+ gic_set_cpu(1, tmp->irq);
+ list_add(&(tmp->list), &(local->irqs.list));
+ }
+
+ /* Allocate free IPI number */
+ /* Read vring0 ipi number */
+ ret = of_property_read_u32(pdev->dev.of_node, "vring0",
+ &local->ipis[0].irq);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to read property");
+ goto irq_fault;
+ }
+
+ ret = set_ipi_handler(local->ipis[0].irq, ipi_kick,
+ "Firmware kick");
+ if (ret) {
+ dev_err(&pdev->dev, "IPI handler already registered\n");
+ goto irq_fault;
+ }
+
+ /* Read vring1 ipi number */
+ ret = of_property_read_u32(pdev->dev.of_node, "vring1",
+ &local->ipis[1].irq);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to read property");
+ goto ipi_fault;
+ }
+
+ /* Find on-chip memory */
+ INIT_LIST_HEAD(&local->mem_pools);
+ INIT_LIST_HEAD(&local->mems);
+ for (i = 0; ; i++) {
+ char *srams_name = "srams";
+
+ mem_pool = of_gen_pool_get(pdev->dev.of_node,
+ srams_name, i);
+ if (mem_pool) {
+ mem_node = devm_kzalloc(&pdev->dev,
+ sizeof(struct mem_pool_st),
+ GFP_KERNEL);
+ if (!mem_node)
+ goto ipi_fault;
+ mem_node->pool = mem_pool;
+ list_add_tail(&mem_node->node, &local->mem_pools);
+ } else {
+ break;
+ }
+ }
+ ret = zynq_rproc_add_mems(local);
+ if (ret) {
+ dev_err(&pdev->dev, "rproc failed to add mems\n");
+ goto ipi_fault;
+ }
+
+ rproc->auto_boot = autoboot;
+
+ ret = rproc_add(local->rproc);
+ if (ret) {
+ dev_err(&pdev->dev, "rproc registration failed\n");
+ goto ipi_fault;
+ }
+
+ return 0;
+
+ipi_fault:
+ clear_ipi_handler(local->ipis[0].irq);
+
+irq_fault:
+ clear_irq(rproc);
+
+dma_mask_fault:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int zynq_remoteproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct zynq_rproc_pdata *local = rproc->priv;
+ struct rproc_mem_entry *mem;
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+
+ rproc_del(rproc);
+
+ clear_ipi_handler(local->ipis[0].irq);
+ clear_irq(rproc);
+
+ list_for_each_entry(mem, &local->mems, node) {
+ if (mem->priv)
+ gen_pool_free((struct gen_pool *)mem->priv,
+ (unsigned long)mem->va, mem->len);
+ }
+
+ rproc_free(rproc);
+
+ return 0;
+}
+
+/* Match table for OF platform binding */
+static const struct of_device_id zynq_remoteproc_match[] = {
+ { .compatible = "xlnx,zynq_remoteproc", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, zynq_remoteproc_match);
+
+static struct platform_driver zynq_remoteproc_driver = {
+ .probe = zynq_remoteproc_probe,
+ .remove = zynq_remoteproc_remove,
+ .driver = {
+ .name = "zynq_remoteproc",
+ .of_match_table = zynq_remoteproc_match,
+ },
+};
+module_platform_driver(zynq_remoteproc_driver);
+
+module_param_named(autoboot, autoboot, bool, 0444);
+MODULE_PARM_DESC(autoboot,
+ "enable | disable autoboot. (default: false)");
+
+MODULE_AUTHOR("Michal Simek <monstr@monstr.eu");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Zynq remote processor control driver");
diff --git a/drivers/remoteproc/zynqmp_r5_remoteproc.c b/drivers/remoteproc/zynqmp_r5_remoteproc.c
new file mode 100644
index 000000000000..5df4e512865e
--- /dev/null
+++ b/drivers/remoteproc/zynqmp_r5_remoteproc.c
@@ -0,0 +1,966 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Zynq R5 Remote Processor driver
+ *
+ * Copyright (C) 2015 - 2018 Xilinx Inc.
+ * Copyright (C) 2015 Jason Wu <j.wu@xilinx.com>
+ *
+ * Based on origin OMAP and Zynq Remote Processor driver
+ *
+ * Copyright (C) 2012 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2012 PetaLogix
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ */
+
+#include <linux/atomic.h>
+#include <linux/cpu.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/genalloc.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/zynqmp-ipi-message.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pfn.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "remoteproc_internal.h"
+
+#define MAX_RPROCS 2 /* Support up to 2 RPU */
+#define MAX_MEM_PNODES 4 /* Max power nodes for one RPU memory instance */
+
+#define DEFAULT_FIRMWARE_NAME "rproc-rpu-fw"
+
+/* PM proc states */
+#define PM_PROC_STATE_ACTIVE 1U
+
+/* RX mailbox client buffer max length */
+#define RX_MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \
+ sizeof(struct zynqmp_ipi_message))
+
+static bool autoboot __read_mostly;
+static bool allow_sysfs_kick __read_mostly;
+
+struct zynqmp_rpu_domain_pdata;
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+/**
+ * struct zynqmp_r5_mem - zynqmp rpu memory data
+ * @pnode_id: TCM power domain ids
+ * @res: memory resource
+ * @node: list node
+ */
+struct zynqmp_r5_mem {
+ u32 pnode_id[MAX_MEM_PNODES];
+ struct resource res;
+ struct list_head node;
+};
+
+/**
+ * struct zynqmp_r5_pdata - zynqmp rpu remote processor private data
+ * @dev: device of RPU instance
+ * @rproc: rproc handle
+ * @parent: RPU slot platform data
+ * @pnode_id: RPU CPU power domain id
+ * @mems: memory resources
+ * @is_r5_mode_set: indicate if r5 operation mode is set
+ * @tx_mc: tx mailbox client
+ * @rx_mc: rx mailbox client
+ * @tx_chan: tx mailbox channel
+ * @rx_chan: rx mailbox channel
+ * @workqueue: workqueue for the RPU remoteproc
+ * @tx_mc_skbs: socket buffers for tx mailbox client
+ * @rx_mc_buf: rx mailbox client buffer to save the rx message
+ * @remote_kick: flag to indicate if there is a kick from remote
+ */
+struct zynqmp_r5_pdata {
+ struct device dev;
+ struct rproc *rproc;
+ struct zynqmp_rpu_domain_pdata *parent;
+ u32 pnode_id;
+ struct list_head mems;
+ bool is_r5_mode_set;
+ struct mbox_client tx_mc;
+ struct mbox_client rx_mc;
+ struct mbox_chan *tx_chan;
+ struct mbox_chan *rx_chan;
+ struct work_struct workqueue;
+ struct sk_buff_head tx_mc_skbs;
+ unsigned char rx_mc_buf[RX_MBOX_CLIENT_BUF_MAX];
+ atomic_t remote_kick;
+};
+
+/**
+ * struct zynqmp_rpu_domain_pdata - zynqmp rpu platform data
+ * @rpus: table of RPUs
+ * @rpu_mode: RPU core configuration
+ */
+struct zynqmp_rpu_domain_pdata {
+ struct zynqmp_r5_pdata rpus[MAX_RPROCS];
+ enum rpu_oper_mode rpu_mode;
+};
+
+/*
+ * r5_set_mode - set RPU operation mode
+ * @pdata: Remote processor private data
+ *
+ * set RPU oepration mode
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int r5_set_mode(struct zynqmp_r5_pdata *pdata)
+{
+ u32 val[PAYLOAD_ARG_CNT] = {0}, expect;
+ struct zynqmp_rpu_domain_pdata *parent;
+ struct device *dev = &pdata->dev;
+ int ret;
+
+ if (pdata->is_r5_mode_set)
+ return 0;
+ parent = pdata->parent;
+ expect = (u32)parent->rpu_mode;
+ ret = eemi_ops->ioctl(pdata->pnode_id, IOCTL_GET_RPU_OPER_MODE,
+ 0, 0, val);
+ if (ret < 0) {
+ dev_err(dev, "failed to get RPU oper mode.\n");
+ return ret;
+ }
+ if (val[0] == expect) {
+ dev_dbg(dev, "RPU mode matches: %x\n", val[0]);
+ } else {
+ ret = eemi_ops->ioctl(pdata->pnode_id,
+ IOCTL_SET_RPU_OPER_MODE,
+ expect, 0, val);
+ if (ret < 0) {
+ dev_err(dev,
+ "failed to set RPU oper mode.\n");
+ return ret;
+ }
+ }
+ if (expect == (u32)PM_RPU_MODE_LOCKSTEP)
+ expect = (u32)PM_RPU_TCM_COMB;
+ else
+ expect = (u32)PM_RPU_TCM_SPLIT;
+ ret = eemi_ops->ioctl(pdata->pnode_id, IOCTL_TCM_COMB_CONFIG,
+ expect, 0, val);
+ if (ret < 0) {
+ dev_err(dev, "failed to config TCM to %x.\n",
+ expect);
+ return ret;
+ }
+ pdata->is_r5_mode_set = true;
+ return 0;
+}
+
+/**
+ * r5_is_running - check if r5 is running
+ * @pdata: Remote processor private data
+ *
+ * check if R5 is running
+ *
+ * Return: true if r5 is running, false otherwise
+ */
+static bool r5_is_running(struct zynqmp_r5_pdata *pdata)
+{
+ u32 status, requirements, usage;
+ struct device *dev = &pdata->dev;
+
+ if (eemi_ops->get_node_status(pdata->pnode_id,
+ &status, &requirements, &usage)) {
+ dev_err(dev, "Failed to get RPU node %d status.\n",
+ pdata->pnode_id);
+ return false;
+ } else if (status != PM_PROC_STATE_ACTIVE) {
+ dev_dbg(dev, "RPU is not running.\n");
+ return false;
+ }
+
+ dev_dbg(dev, "RPU is running.\n");
+ return true;
+}
+
+/**
+ * r5_request_mem - request RPU memory
+ * @rproc: pointer to remoteproc instance
+ * @mem: pointer to RPU memory
+ *
+ * Request RPU memory resource to make it accessible by the kernel.
+ *
+ * Return: 0 if success, negative value for failure.
+ */
+static int r5_request_mem(struct rproc *rproc, struct zynqmp_r5_mem *mem)
+{
+ int i, ret;
+ struct device *dev = &rproc->dev;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+
+ for (i = 0; i < MAX_MEM_PNODES; i++) {
+ if (mem->pnode_id[i]) {
+ ret = eemi_ops->request_node(mem->pnode_id[i],
+ ZYNQMP_PM_CAPABILITY_ACCESS,
+ 0,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING
+ );
+ if (ret < 0) {
+ dev_err(dev,
+ "failed to request power node: %u\n",
+ mem->pnode_id[i]);
+ return ret;
+ }
+ } else {
+ break;
+ }
+ }
+
+ ret = r5_set_mode(local);
+ if (ret < 0) {
+ dev_err(dev, "failed to set R5 operation mode.\n");
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * ZynqMP R5 remoteproc memory release function
+ */
+static int zynqmp_r5_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct zynqmp_r5_mem *priv;
+ int i, ret;
+ struct device *dev = &rproc->dev;
+
+ priv = mem->priv;
+ if (!priv)
+ return 0;
+ for (i = 0; i < MAX_MEM_PNODES; i++) {
+ if (priv->pnode_id[i]) {
+ dev_dbg(dev, "%s, pnode %d\n",
+ __func__, priv->pnode_id[i]);
+ ret = eemi_ops->release_node(priv->pnode_id[i]);
+ if (ret < 0) {
+ dev_err(dev,
+ "failed to release power node: %u\n",
+ priv->pnode_id[i]);
+ return ret;
+ }
+ } else {
+ break;
+ }
+ }
+ return 0;
+}
+
+/*
+ * ZynqMP R5 remoteproc operations
+ */
+static int zynqmp_r5_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+ enum rpu_boot_mem bootmem;
+ int ret;
+
+ /* Set up R5 */
+ ret = r5_set_mode(local);
+ if (ret) {
+ dev_err(dev, "failed to set R5 operation mode.\n");
+ return ret;
+ }
+ if ((rproc->bootaddr & 0xF0000000) == 0xF0000000)
+ bootmem = PM_RPU_BOOTMEM_HIVEC;
+ else
+ bootmem = PM_RPU_BOOTMEM_LOVEC;
+ dev_info(dev, "RPU boot from %s.",
+ bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM");
+
+ ret = eemi_ops->request_wakeup(local->pnode_id, 1, bootmem,
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ if (ret < 0) {
+ dev_err(dev, "failed to boot R5.\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int zynqmp_r5_rproc_stop(struct rproc *rproc)
+{
+ struct zynqmp_r5_pdata *local = rproc->priv;
+ int ret;
+
+ ret = eemi_ops->force_powerdown(local->pnode_id,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ if (ret < 0) {
+ dev_err(&local->dev, "failed to shutdown R5.\n");
+ return ret;
+ }
+ local->is_r5_mode_set = false;
+ return 0;
+}
+
+static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret;
+
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret == -EINVAL)
+ ret = 0;
+ return ret;
+}
+
+static void *zynqmp_r5_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct zynqmp_r5_pdata *local = rproc->priv;
+ struct zynqmp_r5_mem *mem;
+ struct device *dev;
+
+ dev = &local->dev;
+ list_for_each_entry(mem, &local->mems, node) {
+ struct rproc_mem_entry *rproc_mem;
+ struct resource *res = &mem->res;
+ u64 res_da = (u64)res->start;
+ resource_size_t size;
+ int offset, ret;
+ void *va;
+ dma_addr_t dma;
+
+ if ((res_da & 0xfff00000) == 0xffe00000) {
+ res_da &= 0x000fffff;
+ if (res_da & 0x80000)
+ res_da -= 0x90000;
+ }
+
+ offset = (int)(da - res_da);
+ if (offset < 0)
+ continue;
+ size = resource_size(res);
+ if (offset + len > (int)size)
+ continue;
+
+ ret = r5_request_mem(rproc, mem);
+ if (ret < 0) {
+ dev_err(dev, "failed to request memory %pad.\n",
+ &res->start);
+ return NULL;
+ }
+
+ va = devm_ioremap_wc(dev, res->start, size);
+ dma = (dma_addr_t)res->start;
+ da = (u32)res_da;
+ rproc_mem = rproc_mem_entry_init(dev, va, dma, (int)size, da,
+ NULL, zynqmp_r5_mem_release,
+ res->name);
+ if (!rproc_mem)
+ return NULL;
+ rproc_mem->priv = mem;
+ dev_dbg(dev, "%s: %s, va = %p, da = 0x%x dma = 0x%llx\n",
+ __func__, rproc_mem->name, rproc_mem->va,
+ rproc_mem->da, rproc_mem->dma);
+ rproc_add_carveout(rproc, rproc_mem);
+ return (char *)va + offset;
+ }
+ return NULL;
+}
+
+/* kick a firmware */
+static void zynqmp_r5_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+
+ dev_dbg(dev, "KICK Firmware to start send messages vqid %d\n", vqid);
+
+ if (vqid < 0) {
+ /* If vqid is negative, does not pass the vqid to
+ * mailbox. As vqid is supposed to be 0 or possive.
+ * It also gives a way to just kick instead but
+ * not use the IPI buffer. It is better to provide
+ * a proper way to pass the short message, which will
+ * need to sync to upstream first, for now,
+ * use negative vqid to assume no message will be
+ * passed with IPI buffer, but just raise interrupt.
+ * This will be faster as it doesn't need to copy the
+ * message to the IPI buffer.
+ *
+ * It will ignore the return, as failure is due to
+ * there already kicks in the mailbox queue.
+ */
+ (void)mbox_send_message(local->tx_chan, NULL);
+ } else {
+ struct sk_buff *skb;
+ unsigned int skb_len;
+ struct zynqmp_ipi_message *mb_msg;
+ int ret;
+
+ skb_len = (unsigned int)(sizeof(vqid) + sizeof(mb_msg));
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(dev,
+ "Failed to allocate skb to kick remote.\n");
+ return;
+ }
+ mb_msg = (struct zynqmp_ipi_message *)skb_put(skb, skb_len);
+ mb_msg->len = sizeof(vqid);
+ memcpy(mb_msg->data, &vqid, sizeof(vqid));
+ skb_queue_tail(&local->tx_mc_skbs, skb);
+ ret = mbox_send_message(local->tx_chan, mb_msg);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to kick remote.\n");
+ skb_dequeue_tail(&local->tx_mc_skbs);
+ kfree_skb(skb);
+ }
+ }
+}
+
+static bool zynqmp_r5_rproc_peek_remote_kick(struct rproc *rproc,
+ char *buf, size_t *len)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+
+ dev_dbg(dev, "Peek if remote has kicked\n");
+
+ if (atomic_read(&local->remote_kick) != 0) {
+ if (buf && len) {
+ struct zynqmp_ipi_message *msg;
+
+ msg = (struct zynqmp_ipi_message *)local->rx_mc_buf;
+ memcpy(buf, msg->data, msg->len);
+ *len = (size_t)msg->len;
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static void zynqmp_r5_rproc_ack_remote_kick(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+
+ dev_dbg(dev, "Ack remote\n");
+
+ atomic_set(&local->remote_kick, 0);
+ (void)mbox_send_message(local->rx_chan, NULL);
+}
+
+static struct rproc_ops zynqmp_r5_rproc_ops = {
+ .start = zynqmp_r5_rproc_start,
+ .stop = zynqmp_r5_rproc_stop,
+ .load = rproc_elf_load_segments,
+ .parse_fw = zynqmp_r5_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+ .da_to_va = zynqmp_r5_da_to_va,
+ .kick = zynqmp_r5_rproc_kick,
+ .peek_remote_kick = zynqmp_r5_rproc_peek_remote_kick,
+ .ack_remote_kick = zynqmp_r5_rproc_ack_remote_kick,
+};
+
+/* zynqmp_r5_get_reserved_mems() - get reserved memories
+ * @pdata: pointer to the RPU remoteproc private data
+ *
+ * Function to retrieve the memories resources from memory-region
+ * property.
+ */
+static int zynqmp_r5_get_reserved_mems(struct zynqmp_r5_pdata *pdata)
+{
+ struct device *dev = &pdata->dev;
+ struct device_node *np = dev->of_node;
+ int num_mems;
+ int i;
+
+ num_mems = of_count_phandle_with_args(np, "memory-region", NULL);
+ if (num_mems <= 0)
+ return 0;
+ for (i = 0; i < num_mems; i++) {
+ struct device_node *node;
+ struct zynqmp_r5_mem *mem;
+ int ret;
+
+ node = of_parse_phandle(np, "memory-region", i);
+ ret = of_device_is_compatible(node, "shared-dma-pool");
+ if (ret) {
+ /* it is DMA memory. */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, i);
+ if (ret) {
+ dev_err(dev, "unable to reserve DMA mem.\n");
+ return ret;
+ }
+ dev_dbg(dev, "%s, dma memory %s.\n",
+ __func__, of_node_full_name(node));
+ continue;
+ }
+ /*
+ * It is non-DMA memory, used for firmware loading.
+ * It will be added to the R5 remoteproc mappings later.
+ */
+ mem = devm_kzalloc(dev, sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ ret = of_address_to_resource(node, 0, &mem->res);
+ if (ret) {
+ dev_err(dev, "unable to resolve memory region.\n");
+ return ret;
+ }
+ list_add_tail(&mem->node, &pdata->mems);
+ dev_dbg(dev, "%s, non-dma mem %s\n",
+ __func__, of_node_full_name(node));
+ }
+ return 0;
+}
+
+/* zynqmp_r5_mem_probe() - probes RPU TCM memory device
+ * @pdata: pointer to the RPU remoteproc private data
+ * @node: pointer to the memory node
+ *
+ * Function to retrieve memories resources for RPU TCM memory device.
+ */
+static int zynqmp_r5_mem_probe(struct zynqmp_r5_pdata *pdata,
+ struct device_node *node)
+{
+ struct device *dev;
+ struct zynqmp_r5_mem *mem;
+ int ret;
+
+ dev = &pdata->dev;
+ mem = devm_kzalloc(dev, sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ ret = of_address_to_resource(node, 0, &mem->res);
+ if (ret < 0) {
+ dev_err(dev, "failed to get resource of memory %s",
+ of_node_full_name(node));
+ return -EINVAL;
+ }
+
+ /* Get the power domain id */
+ if (of_find_property(node, "pnode-id", NULL)) {
+ struct property *prop;
+ const __be32 *cur;
+ u32 val;
+ int i = 0;
+
+ of_property_for_each_u32(node, "pnode-id", prop, cur, val)
+ mem->pnode_id[i++] = val;
+ }
+ list_add_tail(&mem->node, &pdata->mems);
+ return 0;
+}
+
+/**
+ * zynqmp_r5_release() - ZynqMP R5 device release function
+ * @dev: pointer to the device struct of ZynqMP R5
+ *
+ * Function to release ZynqMP R5 device.
+ */
+static void zynqmp_r5_release(struct device *dev)
+{
+ struct zynqmp_r5_pdata *pdata;
+ struct rproc *rproc;
+ struct sk_buff *skb;
+
+ pdata = dev_get_drvdata(dev);
+ rproc = pdata->rproc;
+ if (rproc) {
+ rproc_del(rproc);
+ rproc_free(rproc);
+ }
+ if (pdata->tx_chan)
+ mbox_free_channel(pdata->tx_chan);
+ if (pdata->rx_chan)
+ mbox_free_channel(pdata->rx_chan);
+ /* Discard all SKBs */
+ while (!skb_queue_empty(&pdata->tx_mc_skbs)) {
+ skb = skb_dequeue(&pdata->tx_mc_skbs);
+ kfree_skb(skb);
+ }
+
+ put_device(dev->parent);
+}
+
+/**
+ * event_notified_idr_cb() - event notified idr callback
+ * @id: idr id
+ * @ptr: pointer to idr private data
+ * @data: data passed to idr_for_each callback
+ *
+ * Pass notification to remtoeproc virtio
+ *
+ * Return: 0. having return is to satisfy the idr_for_each() function
+ * pointer input argument requirement.
+ **/
+static int event_notified_idr_cb(int id, void *ptr, void *data)
+{
+ struct rproc *rproc = data;
+
+ (void)rproc_vq_interrupt(rproc, id);
+ return 0;
+}
+
+/**
+ * handle_event_notified() - remoteproc notification work funciton
+ * @work: pointer to the work structure
+ *
+ * It checks each registered remoteproc notify IDs.
+ */
+static void handle_event_notified(struct work_struct *work)
+{
+ struct rproc *rproc;
+ struct zynqmp_r5_pdata *local;
+
+ local = container_of(work, struct zynqmp_r5_pdata, workqueue);
+
+ (void)mbox_send_message(local->rx_chan, NULL);
+ rproc = local->rproc;
+ if (rproc->sysfs_kick) {
+ sysfs_notify(&rproc->dev.kobj, NULL, "remote_kick");
+ return;
+ }
+ /*
+ * We only use IPI for interrupt. The firmware side may or may
+ * not write the notifyid when it trigger IPI.
+ * And thus, we scan through all the registered notifyids.
+ */
+ idr_for_each(&rproc->notifyids, event_notified_idr_cb, rproc);
+}
+
+/**
+ * zynqmp_r5_mb_rx_cb() - Receive channel mailbox callback
+ * @cl: mailbox client
+ * @mssg: message pointer
+ *
+ * It will schedule the R5 notification work.
+ */
+static void zynqmp_r5_mb_rx_cb(struct mbox_client *cl, void *mssg)
+{
+ struct zynqmp_r5_pdata *local;
+
+ local = container_of(cl, struct zynqmp_r5_pdata, rx_mc);
+ if (mssg) {
+ struct zynqmp_ipi_message *ipi_msg, *buf_msg;
+ size_t len;
+
+ ipi_msg = (struct zynqmp_ipi_message *)mssg;
+ buf_msg = (struct zynqmp_ipi_message *)local->rx_mc_buf;
+ len = (ipi_msg->len >= IPI_BUF_LEN_MAX) ?
+ IPI_BUF_LEN_MAX : ipi_msg->len;
+ buf_msg->len = len;
+ memcpy(buf_msg->data, ipi_msg->data, len);
+ }
+ atomic_set(&local->remote_kick, 1);
+ schedule_work(&local->workqueue);
+}
+
+/**
+ * zynqmp_r5_mb_tx_done() - Request has been sent to the remote
+ * @cl: mailbox client
+ * @mssg: pointer to the message which has been sent
+ * @r: status of last TX - OK or error
+ *
+ * It will be called by the mailbox framework when the last TX has done.
+ */
+static void zynqmp_r5_mb_tx_done(struct mbox_client *cl, void *mssg, int r)
+{
+ struct zynqmp_r5_pdata *local;
+ struct sk_buff *skb;
+
+ if (!mssg)
+ return;
+ local = container_of(cl, struct zynqmp_r5_pdata, tx_mc);
+ skb = skb_dequeue(&local->tx_mc_skbs);
+ kfree_skb(skb);
+}
+
+/**
+ * zynqmp_r5_setup_mbox() - Setup mailboxes
+ *
+ * @pdata: pointer to the ZynqMP R5 processor platform data
+ * @node: pointer of the device node
+ *
+ * Function to setup mailboxes to talk to RPU.
+ *
+ * Return: 0 for success, negative value for failure.
+ */
+static int zynqmp_r5_setup_mbox(struct zynqmp_r5_pdata *pdata,
+ struct device_node *node)
+{
+ struct device *dev = &pdata->dev;
+ struct mbox_client *mclient;
+
+ /* Setup TX mailbox channel client */
+ mclient = &pdata->tx_mc;
+ mclient->dev = dev;
+ mclient->rx_callback = NULL;
+ mclient->tx_block = false;
+ mclient->knows_txdone = false;
+ mclient->tx_done = zynqmp_r5_mb_tx_done;
+
+ /* Setup TX mailbox channel client */
+ mclient = &pdata->rx_mc;
+ mclient->dev = dev;
+ mclient->rx_callback = zynqmp_r5_mb_rx_cb;
+ mclient->tx_block = false;
+ mclient->knows_txdone = false;
+
+ INIT_WORK(&pdata->workqueue, handle_event_notified);
+
+ atomic_set(&pdata->remote_kick, 0);
+ /* Request TX and RX channels */
+ pdata->tx_chan = mbox_request_channel_byname(&pdata->tx_mc, "tx");
+ if (IS_ERR(pdata->tx_chan)) {
+ dev_err(dev, "failed to request mbox tx channel.\n");
+ pdata->tx_chan = NULL;
+ return -EINVAL;
+ }
+ pdata->rx_chan = mbox_request_channel_byname(&pdata->rx_mc, "rx");
+ if (IS_ERR(pdata->rx_chan)) {
+ dev_err(dev, "failed to request mbox rx channel.\n");
+ pdata->rx_chan = NULL;
+ return -EINVAL;
+ }
+ skb_queue_head_init(&pdata->tx_mc_skbs);
+ return 0;
+}
+
+/**
+ * zynqmp_r5_probe() - Probes ZynqMP R5 processor device node
+ * @pdata: pointer to the ZynqMP R5 processor platform data
+ * @pdev: parent RPU domain platform device
+ * @node: pointer of the device node
+ *
+ * Function to retrieve the information of the ZynqMP R5 device node.
+ *
+ * Return: 0 for success, negative value for failure.
+ */
+static int zynqmp_r5_probe(struct zynqmp_r5_pdata *pdata,
+ struct platform_device *pdev,
+ struct device_node *node)
+{
+ struct device *dev = &pdata->dev;
+ struct rproc *rproc;
+ struct device_node *nc;
+ int ret;
+
+ /* Create device for ZynqMP R5 device */
+ dev->parent = &pdev->dev;
+ dev->release = zynqmp_r5_release;
+ dev->of_node = node;
+ dev_set_name(dev, "%s", of_node_full_name(node));
+ dev_set_drvdata(dev, pdata);
+ ret = device_register(dev);
+ if (ret) {
+ dev_err(dev, "failed to register device.\n");
+ return ret;
+ }
+ get_device(&pdev->dev);
+
+ /* Allocate remoteproc instance */
+ rproc = rproc_alloc(dev, dev_name(dev), &zynqmp_r5_rproc_ops, NULL, 0);
+ if (!rproc) {
+ dev_err(dev, "rproc allocation failed.\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ rproc->auto_boot = autoboot;
+ pdata->rproc = rproc;
+ rproc->priv = pdata;
+
+ /* Probe R5 memory devices */
+ INIT_LIST_HEAD(&pdata->mems);
+ for_each_available_child_of_node(node, nc) {
+ ret = zynqmp_r5_mem_probe(pdata, nc);
+ if (ret) {
+ dev_err(dev, "failed to probe memory %s.\n",
+ of_node_full_name(nc));
+ goto error;
+ }
+ }
+
+ /* Probe reserved system memories used by R5 */
+ ret = zynqmp_r5_get_reserved_mems(pdata);
+ if (ret) {
+ dev_err(dev, "failed to get reserved memory.\n");
+ goto error;
+ }
+
+ /* Set up DMA mask */
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_warn(dev, "dma_set_coherent_mask failed: %d\n", ret);
+ /* If DMA is not configured yet, try to configure it. */
+ ret = of_dma_configure(dev, node, true);
+ if (ret) {
+ dev_err(dev, "failed to configure DMA.\n");
+ goto error;
+ }
+ }
+
+ /* Get R5 power domain node */
+ ret = of_property_read_u32(node, "pnode-id", &pdata->pnode_id);
+ if (ret) {
+ dev_err(dev, "failed to get power node id.\n");
+ goto error;
+ }
+
+ /* Check if R5 is running */
+ if (r5_is_running(pdata)) {
+ atomic_inc(&rproc->power);
+ rproc->state = RPROC_RUNNING;
+ }
+
+ if (!of_get_property(dev->of_node, "mboxes", NULL)) {
+ dev_info(dev, "no mailboxes.\n");
+ } else {
+ ret = zynqmp_r5_setup_mbox(pdata, node);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Add R5 remoteproc */
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "rproc registration failed\n");
+ goto error;
+ }
+
+ if (allow_sysfs_kick) {
+ dev_info(dev, "Trying to create remote sysfs entry.\n");
+ rproc->sysfs_kick = 1;
+ (void)rproc_create_kick_sysfs(rproc);
+ }
+
+ return 0;
+error:
+ if (pdata->rproc)
+ rproc_free(pdata->rproc);
+ pdata->rproc = NULL;
+ device_unregister(dev);
+ put_device(&pdev->dev);
+ return ret;
+}
+
+static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev)
+{
+ const unsigned char *prop;
+ int ret = 0, i;
+ struct zynqmp_rpu_domain_pdata *local;
+ struct device *dev = &pdev->dev;
+ struct device_node *nc;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ local = devm_kzalloc(dev, sizeof(*local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, local);
+
+ prop = of_get_property(dev->of_node, "core_conf", NULL);
+ if (!prop) {
+ dev_err(&pdev->dev, "core_conf is not used.\n");
+ return -EINVAL;
+ }
+
+ dev_info(dev, "RPU core_conf: %s\n", prop);
+ if (!strcmp(prop, "split")) {
+ local->rpu_mode = PM_RPU_MODE_SPLIT;
+ } else if (!strcmp(prop, "lockstep")) {
+ local->rpu_mode = PM_RPU_MODE_LOCKSTEP;
+ } else {
+ dev_err(dev,
+ "Invalid core_conf mode provided - %s , %d\n",
+ prop, (int)local->rpu_mode);
+ return -EINVAL;
+ }
+
+ i = 0;
+ for_each_available_child_of_node(dev->of_node, nc) {
+ local->rpus[i].parent = local;
+ ret = zynqmp_r5_probe(&local->rpus[i], pdev, nc);
+ if (ret) {
+ dev_err(dev, "failed to probe rpu %s.\n",
+ of_node_full_name(nc));
+ return ret;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+static int zynqmp_r5_remoteproc_remove(struct platform_device *pdev)
+{
+ struct zynqmp_rpu_domain_pdata *local = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < MAX_RPROCS; i++) {
+ struct zynqmp_r5_pdata *rpu = &local->rpus[i];
+ struct rproc *rproc;
+
+ rproc = rpu->rproc;
+ if (rproc) {
+ rproc_del(rproc);
+ rproc_free(rproc);
+ rpu->rproc = NULL;
+ }
+ if (rpu->tx_chan) {
+ mbox_free_channel(rpu->tx_chan);
+ rpu->tx_chan = NULL;
+ }
+ if (rpu->rx_chan) {
+ mbox_free_channel(rpu->rx_chan);
+ rpu->rx_chan = NULL;
+ }
+
+ device_unregister(&rpu->dev);
+ }
+
+ return 0;
+}
+
+/* Match table for OF platform binding */
+static const struct of_device_id zynqmp_r5_remoteproc_match[] = {
+ { .compatible = "xlnx,zynqmp-r5-remoteproc-1.0", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, zynqmp_r5_remoteproc_match);
+
+static struct platform_driver zynqmp_r5_remoteproc_driver = {
+ .probe = zynqmp_r5_remoteproc_probe,
+ .remove = zynqmp_r5_remoteproc_remove,
+ .driver = {
+ .name = "zynqmp_r5_remoteproc",
+ .of_match_table = zynqmp_r5_remoteproc_match,
+ },
+};
+module_platform_driver(zynqmp_r5_remoteproc_driver);
+
+module_param_named(autoboot, autoboot, bool, 0444);
+MODULE_PARM_DESC(autoboot,
+ "enable | disable autoboot. (default: true)");
+module_param_named(allow_sysfs_kick, allow_sysfs_kick, bool, 0444);
+MODULE_PARM_DESC(allow_sysfs_kick,
+ "enable | disable allow kick from sysfs. (default: false)");
+
+MODULE_AUTHOR("Jason Wu <j.wu@xilinx.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ZynqMP R5 remote processor control driver");
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 61456b8f659c..ca47e43dcc1f 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -28,4 +28,3 @@ obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o
-
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index 00639594de0c..1dd62a59c241 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -44,7 +44,7 @@ struct xlnx_rtc_dev {
void __iomem *reg_base;
int alarm_irq;
int sec_irq;
- int calibval;
+ unsigned int calibval;
};
static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 3fa162c1fde7..4cce9f315f4e 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -3,6 +3,7 @@ menu "Xilinx SoC drivers"
config XILINX_VCU
tristate "Xilinx VCU logicoreIP Init"
+ select MFD_CORE
depends on HAS_IOMEM
help
Provides the driver to enable and disable the isolation between the
@@ -21,11 +22,15 @@ config ZYNQMP_POWER
bool "Enable Xilinx Zynq MPSoC Power Management driver"
depends on PM && ZYNQMP_FIRMWARE
default y
+ select MAILBOX
+ select ZYNQMP_IPI_MBOX
help
Say yes to enable power management support for ZyqnMP SoC.
This driver uses firmware driver as an interface for power
management request to firmware. It registers isr to handle
- power management callbacks from firmware.
+ power management callbacks from firmware. It registers mailbox client
+ to handle power management callbacks from firmware.
+
If in doubt, say N.
config ZYNQMP_PM_DOMAINS
diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile
index f66bfea5de17..12613b20001d 100644
--- a/drivers/soc/xilinx/Makefile
+++ b/drivers/soc/xilinx/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o
+obj-$(CONFIG_XILINX_VCU) += xlnx_vcu_core.o xlnx_vcu_clk.o xlnx_vcu.o
obj-$(CONFIG_ZYNQMP_POWER) += zynqmp_power.o
obj-$(CONFIG_ZYNQMP_PM_DOMAINS) += zynqmp_pm_domains.o
+
+obj-$(CONFIG_ARCH_ZYNQMP) += zynqmp/
diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c
index a840c0272135..d89fcbaf9446 100644
--- a/drivers/soc/xilinx/xlnx_vcu.c
+++ b/drivers/soc/xilinx/xlnx_vcu.c
@@ -14,6 +14,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <soc/xilinx/xlnx_vcu.h>
+
/* Address map for different registers implemented in the VCU LogiCORE IP. */
#define VCU_ECODER_ENABLE 0x00
#define VCU_DECODER_ENABLE 0x04
@@ -26,14 +28,9 @@
#define VCU_ENC_FPS 0x20
#define VCU_MCU_CLK 0x24
#define VCU_CORE_CLK 0x28
-#define VCU_PLL_BYPASS 0x2c
-#define VCU_ENC_CLK 0x30
#define VCU_PLL_CLK 0x34
#define VCU_ENC_VIDEO_STANDARD 0x38
#define VCU_STATUS 0x3c
-#define VCU_AXI_ENC_CLK 0x40
-#define VCU_AXI_DEC_CLK 0x44
-#define VCU_AXI_MCU_CLK 0x48
#define VCU_DEC_VIDEO_STANDARD 0x4c
#define VCU_DEC_FRAME_SIZE_X 0x50
#define VCU_DEC_FRAME_SIZE_Y 0x54
@@ -41,196 +38,33 @@
#define VCU_BUFFER_B_FRAME 0x5c
#define VCU_WPP_EN 0x60
#define VCU_PLL_CLK_DEC 0x64
+#define VCU_NUM_CORE 0x6c
#define VCU_GASKET_INIT 0x74
#define VCU_GASKET_VALUE 0x03
-/* vcu slcr registers, bitmask and shift */
-#define VCU_PLL_CTRL 0x24
-#define VCU_PLL_CTRL_RESET_MASK 0x01
-#define VCU_PLL_CTRL_RESET_SHIFT 0
-#define VCU_PLL_CTRL_BYPASS_MASK 0x01
-#define VCU_PLL_CTRL_BYPASS_SHIFT 3
-#define VCU_PLL_CTRL_FBDIV_MASK 0x7f
-#define VCU_PLL_CTRL_FBDIV_SHIFT 8
-#define VCU_PLL_CTRL_POR_IN_MASK 0x01
-#define VCU_PLL_CTRL_POR_IN_SHIFT 1
-#define VCU_PLL_CTRL_PWR_POR_MASK 0x01
-#define VCU_PLL_CTRL_PWR_POR_SHIFT 2
-#define VCU_PLL_CTRL_CLKOUTDIV_MASK 0x03
-#define VCU_PLL_CTRL_CLKOUTDIV_SHIFT 16
-#define VCU_PLL_CTRL_DEFAULT 0
-#define VCU_PLL_DIV2 2
-
-#define VCU_PLL_CFG 0x28
-#define VCU_PLL_CFG_RES_MASK 0x0f
-#define VCU_PLL_CFG_RES_SHIFT 0
-#define VCU_PLL_CFG_CP_MASK 0x0f
-#define VCU_PLL_CFG_CP_SHIFT 5
-#define VCU_PLL_CFG_LFHF_MASK 0x03
-#define VCU_PLL_CFG_LFHF_SHIFT 10
-#define VCU_PLL_CFG_LOCK_CNT_MASK 0x03ff
-#define VCU_PLL_CFG_LOCK_CNT_SHIFT 13
-#define VCU_PLL_CFG_LOCK_DLY_MASK 0x7f
-#define VCU_PLL_CFG_LOCK_DLY_SHIFT 25
-#define VCU_ENC_CORE_CTRL 0x30
-#define VCU_ENC_MCU_CTRL 0x34
-#define VCU_DEC_CORE_CTRL 0x38
-#define VCU_DEC_MCU_CTRL 0x3c
-#define VCU_PLL_DIVISOR_MASK 0x3f
-#define VCU_PLL_DIVISOR_SHIFT 4
-#define VCU_SRCSEL_MASK 0x01
-#define VCU_SRCSEL_SHIFT 0
-#define VCU_SRCSEL_PLL 1
-
-#define VCU_PLL_STATUS 0x60
-#define VCU_PLL_STATUS_LOCK_STATUS_MASK 0x01
-
#define MHZ 1000000
-#define FVCO_MIN (1500U * MHZ)
-#define FVCO_MAX (3000U * MHZ)
-#define DIVISOR_MIN 0
-#define DIVISOR_MAX 63
#define FRAC 100
-#define LIMIT (10 * MHZ)
/**
- * struct xvcu_device - Xilinx VCU init device structure
+ * struct xvcu_priv - Xilinx VCU private data
* @dev: Platform device
- * @pll_ref: pll ref clock source
- * @aclk: axi clock source
+ * @pll_ref: PLL ref clock source
+ * @core_enc: Core encoder clock
+ * @core_dec: Core decoder clock
+ * @mcu_enc: MCU encoder clock
+ * @mcu_dec: MCU decoder clock
* @logicore_reg_ba: logicore reg base address
* @vcu_slcr_ba: vcu_slcr Register base address
- * @coreclk: core clock frequency
*/
-struct xvcu_device {
+struct xvcu_priv {
struct device *dev;
struct clk *pll_ref;
- struct clk *aclk;
+ struct clk *core_enc;
+ struct clk *core_dec;
+ struct clk *mcu_enc;
+ struct clk *mcu_dec;
void __iomem *logicore_reg_ba;
void __iomem *vcu_slcr_ba;
- u32 coreclk;
-};
-
-/**
- * struct xvcu_pll_cfg - Helper data
- * @fbdiv: The integer portion of the feedback divider to the PLL
- * @cp: PLL charge pump control
- * @res: PLL loop filter resistor control
- * @lfhf: PLL loop filter high frequency capacitor control
- * @lock_dly: Lock circuit configuration settings for lock windowsize
- * @lock_cnt: Lock circuit counter setting
- */
-struct xvcu_pll_cfg {
- u32 fbdiv;
- u32 cp;
- u32 res;
- u32 lfhf;
- u32 lock_dly;
- u32 lock_cnt;
-};
-
-static const struct xvcu_pll_cfg xvcu_pll_cfg[] = {
- { 25, 3, 10, 3, 63, 1000 },
- { 26, 3, 10, 3, 63, 1000 },
- { 27, 4, 6, 3, 63, 1000 },
- { 28, 4, 6, 3, 63, 1000 },
- { 29, 4, 6, 3, 63, 1000 },
- { 30, 4, 6, 3, 63, 1000 },
- { 31, 6, 1, 3, 63, 1000 },
- { 32, 6, 1, 3, 63, 1000 },
- { 33, 4, 10, 3, 63, 1000 },
- { 34, 5, 6, 3, 63, 1000 },
- { 35, 5, 6, 3, 63, 1000 },
- { 36, 5, 6, 3, 63, 1000 },
- { 37, 5, 6, 3, 63, 1000 },
- { 38, 5, 6, 3, 63, 975 },
- { 39, 3, 12, 3, 63, 950 },
- { 40, 3, 12, 3, 63, 925 },
- { 41, 3, 12, 3, 63, 900 },
- { 42, 3, 12, 3, 63, 875 },
- { 43, 3, 12, 3, 63, 850 },
- { 44, 3, 12, 3, 63, 850 },
- { 45, 3, 12, 3, 63, 825 },
- { 46, 3, 12, 3, 63, 800 },
- { 47, 3, 12, 3, 63, 775 },
- { 48, 3, 12, 3, 63, 775 },
- { 49, 3, 12, 3, 63, 750 },
- { 50, 3, 12, 3, 63, 750 },
- { 51, 3, 2, 3, 63, 725 },
- { 52, 3, 2, 3, 63, 700 },
- { 53, 3, 2, 3, 63, 700 },
- { 54, 3, 2, 3, 63, 675 },
- { 55, 3, 2, 3, 63, 675 },
- { 56, 3, 2, 3, 63, 650 },
- { 57, 3, 2, 3, 63, 650 },
- { 58, 3, 2, 3, 63, 625 },
- { 59, 3, 2, 3, 63, 625 },
- { 60, 3, 2, 3, 63, 625 },
- { 61, 3, 2, 3, 63, 600 },
- { 62, 3, 2, 3, 63, 600 },
- { 63, 3, 2, 3, 63, 600 },
- { 64, 3, 2, 3, 63, 600 },
- { 65, 3, 2, 3, 63, 600 },
- { 66, 3, 2, 3, 63, 600 },
- { 67, 3, 2, 3, 63, 600 },
- { 68, 3, 2, 3, 63, 600 },
- { 69, 3, 2, 3, 63, 600 },
- { 70, 3, 2, 3, 63, 600 },
- { 71, 3, 2, 3, 63, 600 },
- { 72, 3, 2, 3, 63, 600 },
- { 73, 3, 2, 3, 63, 600 },
- { 74, 3, 2, 3, 63, 600 },
- { 75, 3, 2, 3, 63, 600 },
- { 76, 3, 2, 3, 63, 600 },
- { 77, 3, 2, 3, 63, 600 },
- { 78, 3, 2, 3, 63, 600 },
- { 79, 3, 2, 3, 63, 600 },
- { 80, 3, 2, 3, 63, 600 },
- { 81, 3, 2, 3, 63, 600 },
- { 82, 3, 2, 3, 63, 600 },
- { 83, 4, 2, 3, 63, 600 },
- { 84, 4, 2, 3, 63, 600 },
- { 85, 4, 2, 3, 63, 600 },
- { 86, 4, 2, 3, 63, 600 },
- { 87, 4, 2, 3, 63, 600 },
- { 88, 4, 2, 3, 63, 600 },
- { 89, 4, 2, 3, 63, 600 },
- { 90, 4, 2, 3, 63, 600 },
- { 91, 4, 2, 3, 63, 600 },
- { 92, 4, 2, 3, 63, 600 },
- { 93, 4, 2, 3, 63, 600 },
- { 94, 4, 2, 3, 63, 600 },
- { 95, 4, 2, 3, 63, 600 },
- { 96, 4, 2, 3, 63, 600 },
- { 97, 4, 2, 3, 63, 600 },
- { 98, 4, 2, 3, 63, 600 },
- { 99, 4, 2, 3, 63, 600 },
- { 100, 4, 2, 3, 63, 600 },
- { 101, 4, 2, 3, 63, 600 },
- { 102, 4, 2, 3, 63, 600 },
- { 103, 5, 2, 3, 63, 600 },
- { 104, 5, 2, 3, 63, 600 },
- { 105, 5, 2, 3, 63, 600 },
- { 106, 5, 2, 3, 63, 600 },
- { 107, 3, 4, 3, 63, 600 },
- { 108, 3, 4, 3, 63, 600 },
- { 109, 3, 4, 3, 63, 600 },
- { 110, 3, 4, 3, 63, 600 },
- { 111, 3, 4, 3, 63, 600 },
- { 112, 3, 4, 3, 63, 600 },
- { 113, 3, 4, 3, 63, 600 },
- { 114, 3, 4, 3, 63, 600 },
- { 115, 3, 4, 3, 63, 600 },
- { 116, 3, 4, 3, 63, 600 },
- { 117, 3, 4, 3, 63, 600 },
- { 118, 3, 4, 3, 63, 600 },
- { 119, 3, 4, 3, 63, 600 },
- { 120, 3, 4, 3, 63, 600 },
- { 121, 3, 4, 3, 63, 600 },
- { 122, 3, 4, 3, 63, 600 },
- { 123, 3, 4, 3, 63, 600 },
- { 124, 3, 4, 3, 63, 600 },
- { 125, 3, 4, 3, 63, 600 },
};
/**
@@ -258,47 +92,71 @@ static inline void xvcu_write(void __iomem *iomem, u32 offset, u32 value)
}
/**
- * xvcu_write_field_reg - Write to the vcu reg field
- * @iomem: vcu reg space base address
- * @offset: vcu reg offset from base
- * @field: vcu reg field to write to
- * @mask: vcu reg mask
- * @shift: vcu reg number of bits to shift the bitfield
+ * xvcu_get_color_depth - read the color depth register
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Return: Returns 32bit value
+ *
+ */
+u32 xvcu_get_color_depth(struct xvcu_device *xvcu)
+{
+ return xvcu_read(xvcu->logicore_reg_ba, VCU_ENC_COLOR_DEPTH);
+}
+EXPORT_SYMBOL_GPL(xvcu_get_color_depth);
+
+/**
+ * xvcu_get_memory_depth - read the memory depth register
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Return: Returns 32bit value
+ *
*/
-static void xvcu_write_field_reg(void __iomem *iomem, int offset,
- u32 field, u32 mask, int shift)
+u32 xvcu_get_memory_depth(struct xvcu_device *xvcu)
{
- u32 val = xvcu_read(iomem, offset);
+ return xvcu_read(xvcu->logicore_reg_ba, VCU_MEMORY_DEPTH);
+}
+EXPORT_SYMBOL_GPL(xvcu_get_memory_depth);
- val &= ~(mask << shift);
- val |= (field & mask) << shift;
+/**
+ * xvcu_get_clock_frequency - provide the core clock frequency
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Return: Returns 32bit value
+ *
+ */
+u32 xvcu_get_clock_frequency(struct xvcu_device *xvcu)
+{
+ return xvcu_read(xvcu->logicore_reg_ba, VCU_CORE_CLK) * MHZ;
+}
+EXPORT_SYMBOL_GPL(xvcu_get_clock_frequency);
- xvcu_write(iomem, offset, val);
+/**
+ * xvcu_get_num_cores - read the number of core register
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Return: Returns 32bit value
+ *
+ */
+u32 xvcu_get_num_cores(struct xvcu_device *xvcu)
+{
+ return xvcu_read(xvcu->logicore_reg_ba, VCU_NUM_CORE);
}
+EXPORT_SYMBOL_GPL(xvcu_get_num_cores);
/**
- * xvcu_set_vcu_pll_info - Set the VCU PLL info
+ * xvcu_set_vcu_pll - Set the VCU PLL
* @xvcu: Pointer to the xvcu_device structure
*
* Programming the VCU PLL based on the user configuration
* (ref clock freq, core clock freq, mcu clock freq).
* Core clock frequency has higher priority than mcu clock frequency
- * Errors in following cases
- * - When mcu or clock clock get from logicoreIP is 0
- * - When VCU PLL DIV related bits value other than 1
- * - When proper data not found for given data
- * - When sis570_1 clocksource related operation failed
*
* Return: Returns status, either success or error+reason
*/
-static int xvcu_set_vcu_pll_info(struct xvcu_device *xvcu)
+static int xvcu_set_vcu_pll(struct xvcu_priv *xvcu)
{
u32 refclk, coreclk, mcuclk, inte, deci;
- u32 divisor_mcu, divisor_core, fvco;
- u32 clkoutdiv, vcu_pll_ctrl, pll_clk;
- u32 cfg_val, mod, ctrl;
- int ret, i;
- const struct xvcu_pll_cfg *found = NULL;
+ int ret;
inte = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK);
deci = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK_DEC);
@@ -314,175 +172,74 @@ static int xvcu_set_vcu_pll_info(struct xvcu_device *xvcu)
dev_dbg(xvcu->dev, "Core clock from logicoreIP is %uHz\n", coreclk);
dev_dbg(xvcu->dev, "Mcu clock from logicoreIP is %uHz\n", mcuclk);
- clk_disable_unprepare(xvcu->pll_ref);
ret = clk_set_rate(xvcu->pll_ref, refclk);
if (ret)
- dev_warn(xvcu->dev, "failed to set logicoreIP refclk rate\n");
+ dev_warn(xvcu->dev, "failed to set logicoreIP refclk rate %d\n"
+ , ret);
ret = clk_prepare_enable(xvcu->pll_ref);
if (ret) {
- dev_err(xvcu->dev, "failed to enable pll_ref clock source\n");
+ dev_err(xvcu->dev, "failed to enable pll_ref clock source %d\n",
+ ret);
return ret;
}
- refclk = clk_get_rate(xvcu->pll_ref);
-
- /*
- * The divide-by-2 should be always enabled (==1)
- * to meet the timing in the design.
- * Otherwise, it's an error
- */
- vcu_pll_ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_PLL_CTRL);
- clkoutdiv = vcu_pll_ctrl >> VCU_PLL_CTRL_CLKOUTDIV_SHIFT;
- clkoutdiv = clkoutdiv & VCU_PLL_CTRL_CLKOUTDIV_MASK;
- if (clkoutdiv != 1) {
- dev_err(xvcu->dev, "clkoutdiv value is invalid\n");
- return -EINVAL;
- }
+ ret = clk_set_rate(xvcu->mcu_enc, mcuclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP mcu clk rate %d\n",
+ ret);
- for (i = ARRAY_SIZE(xvcu_pll_cfg) - 1; i >= 0; i--) {
- const struct xvcu_pll_cfg *cfg = &xvcu_pll_cfg[i];
-
- fvco = cfg->fbdiv * refclk;
- if (fvco >= FVCO_MIN && fvco <= FVCO_MAX) {
- pll_clk = fvco / VCU_PLL_DIV2;
- if (fvco % VCU_PLL_DIV2 != 0)
- pll_clk++;
- mod = pll_clk % coreclk;
- if (mod < LIMIT) {
- divisor_core = pll_clk / coreclk;
- } else if (coreclk - mod < LIMIT) {
- divisor_core = pll_clk / coreclk;
- divisor_core++;
- } else {
- continue;
- }
- if (divisor_core >= DIVISOR_MIN &&
- divisor_core <= DIVISOR_MAX) {
- found = cfg;
- divisor_mcu = pll_clk / mcuclk;
- mod = pll_clk % mcuclk;
- if (mcuclk - mod < LIMIT)
- divisor_mcu++;
- break;
- }
- }
+ ret = clk_prepare_enable(xvcu->mcu_enc);
+ if (ret) {
+ dev_err(xvcu->dev, "failed to enable mcu_enc %d\n", ret);
+ goto error_mcu_enc;
}
- if (!found) {
- dev_err(xvcu->dev, "Invalid clock combination.\n");
- return -EINVAL;
+ ret = clk_set_rate(xvcu->mcu_dec, mcuclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP mcu clk rate %d\n",
+ ret);
+
+ ret = clk_prepare_enable(xvcu->mcu_dec);
+ if (ret) {
+ dev_err(xvcu->dev, "failed to enable mcu_dec %d\n", ret);
+ goto error_mcu_dec;
}
- xvcu->coreclk = pll_clk / divisor_core;
- mcuclk = pll_clk / divisor_mcu;
- dev_dbg(xvcu->dev, "Actual Ref clock freq is %uHz\n", refclk);
- dev_dbg(xvcu->dev, "Actual Core clock freq is %uHz\n", xvcu->coreclk);
- dev_dbg(xvcu->dev, "Actual Mcu clock freq is %uHz\n", mcuclk);
-
- vcu_pll_ctrl &= ~(VCU_PLL_CTRL_FBDIV_MASK << VCU_PLL_CTRL_FBDIV_SHIFT);
- vcu_pll_ctrl |= (found->fbdiv & VCU_PLL_CTRL_FBDIV_MASK) <<
- VCU_PLL_CTRL_FBDIV_SHIFT;
- vcu_pll_ctrl &= ~(VCU_PLL_CTRL_POR_IN_MASK <<
- VCU_PLL_CTRL_POR_IN_SHIFT);
- vcu_pll_ctrl |= (VCU_PLL_CTRL_DEFAULT & VCU_PLL_CTRL_POR_IN_MASK) <<
- VCU_PLL_CTRL_POR_IN_SHIFT;
- vcu_pll_ctrl &= ~(VCU_PLL_CTRL_PWR_POR_MASK <<
- VCU_PLL_CTRL_PWR_POR_SHIFT);
- vcu_pll_ctrl |= (VCU_PLL_CTRL_DEFAULT & VCU_PLL_CTRL_PWR_POR_MASK) <<
- VCU_PLL_CTRL_PWR_POR_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_PLL_CTRL, vcu_pll_ctrl);
-
- /* Set divisor for the core and mcu clock */
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_ENC_CORE_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_core & VCU_PLL_DIVISOR_MASK) <<
- VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_ENC_CORE_CTRL, ctrl);
-
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_DEC_CORE_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_core & VCU_PLL_DIVISOR_MASK) <<
- VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_DEC_CORE_CTRL, ctrl);
-
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_ENC_MCU_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_mcu & VCU_PLL_DIVISOR_MASK) << VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_ENC_MCU_CTRL, ctrl);
-
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_DEC_MCU_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_mcu & VCU_PLL_DIVISOR_MASK) << VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_DEC_MCU_CTRL, ctrl);
-
- /* Set RES, CP, LFHF, LOCK_CNT and LOCK_DLY cfg values */
- cfg_val = (found->res << VCU_PLL_CFG_RES_SHIFT) |
- (found->cp << VCU_PLL_CFG_CP_SHIFT) |
- (found->lfhf << VCU_PLL_CFG_LFHF_SHIFT) |
- (found->lock_cnt << VCU_PLL_CFG_LOCK_CNT_SHIFT) |
- (found->lock_dly << VCU_PLL_CFG_LOCK_DLY_SHIFT);
- xvcu_write(xvcu->vcu_slcr_ba, VCU_PLL_CFG, cfg_val);
+ ret = clk_set_rate(xvcu->core_enc, coreclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP core clk rate %d\n",
+ ret);
- return 0;
-}
+ ret = clk_prepare_enable(xvcu->core_enc);
+ if (ret) {
+ dev_err(xvcu->dev, "failed to enable core_enc %d\n", ret);
+ goto error_core_enc;
+ }
-/**
- * xvcu_set_pll - PLL init sequence
- * @xvcu: Pointer to the xvcu_device structure
- *
- * Call the api to set the PLL info and once that is done then
- * init the PLL sequence to make the PLL stable.
- *
- * Return: Returns status, either success or error+reason
- */
-static int xvcu_set_pll(struct xvcu_device *xvcu)
-{
- u32 lock_status;
- unsigned long timeout;
- int ret;
+ ret = clk_set_rate(xvcu->core_dec, coreclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP core clk rate %d\n",
+ ret);
- ret = xvcu_set_vcu_pll_info(xvcu);
+ ret = clk_prepare_enable(xvcu->core_dec);
if (ret) {
- dev_err(xvcu->dev, "failed to set pll info\n");
- return ret;
+ dev_err(xvcu->dev, "failed to enable core_dec %d\n", ret);
+ goto error_core_dec;
}
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 1, VCU_PLL_CTRL_BYPASS_MASK,
- VCU_PLL_CTRL_BYPASS_SHIFT);
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 1, VCU_PLL_CTRL_RESET_MASK,
- VCU_PLL_CTRL_RESET_SHIFT);
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 0, VCU_PLL_CTRL_RESET_MASK,
- VCU_PLL_CTRL_RESET_SHIFT);
- /*
- * Defined the timeout for the max time to wait the
- * PLL_STATUS to be locked.
- */
- timeout = jiffies + msecs_to_jiffies(2000);
- do {
- lock_status = xvcu_read(xvcu->vcu_slcr_ba, VCU_PLL_STATUS);
- if (lock_status & VCU_PLL_STATUS_LOCK_STATUS_MASK) {
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 0, VCU_PLL_CTRL_BYPASS_MASK,
- VCU_PLL_CTRL_BYPASS_SHIFT);
- return 0;
- }
- } while (!time_after(jiffies, timeout));
-
- /* PLL is not locked even after the timeout of the 2sec */
- dev_err(xvcu->dev, "PLL is not locked\n");
- return -ETIMEDOUT;
+ return 0;
+
+error_core_dec:
+ clk_disable_unprepare(xvcu->core_enc);
+error_core_enc:
+ clk_disable_unprepare(xvcu->mcu_dec);
+error_mcu_dec:
+ clk_disable_unprepare(xvcu->mcu_enc);
+error_mcu_enc:
+ clk_disable_unprepare(xvcu->pll_ref);
+
+ return ret;
}
/**
@@ -496,8 +253,8 @@ static int xvcu_set_pll(struct xvcu_device *xvcu)
*/
static int xvcu_probe(struct platform_device *pdev)
{
- struct resource *res;
- struct xvcu_device *xvcu;
+ struct xvcu_priv *xvcu;
+ struct xvcu_device *xvcu_core = dev_get_drvdata(pdev->dev.parent);
int ret;
xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
@@ -505,85 +262,61 @@ static int xvcu_probe(struct platform_device *pdev)
return -ENOMEM;
xvcu->dev = &pdev->dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vcu_slcr");
- if (!res) {
- dev_err(&pdev->dev, "get vcu_slcr memory resource failed.\n");
- return -ENODEV;
- }
+ xvcu->vcu_slcr_ba = xvcu_core->vcu_slcr_ba;
+ xvcu->logicore_reg_ba = xvcu_core->logicore_reg_ba;
- xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!xvcu->vcu_slcr_ba) {
- dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
- return -ENOMEM;
+ xvcu->pll_ref = devm_clk_get(pdev->dev.parent, "pll_ref");
+ if (IS_ERR(xvcu->pll_ref)) {
+ dev_err(&pdev->dev, "Could not get pll_ref clock\n");
+ return PTR_ERR(xvcu->pll_ref);
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "logicore");
- if (!res) {
- dev_err(&pdev->dev, "get logicore memory resource failed.\n");
- return -ENODEV;
+ xvcu->core_enc = devm_clk_get(pdev->dev.parent, "vcu_core_enc");
+ if (IS_ERR(xvcu->core_enc)) {
+ dev_err(&pdev->dev, "Could not get core_enc clock\n");
+ return PTR_ERR(xvcu->core_enc);
}
- xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!xvcu->logicore_reg_ba) {
- dev_err(&pdev->dev, "logicore register mapping failed.\n");
- return -ENOMEM;
+ xvcu->core_dec = devm_clk_get(pdev->dev.parent, "vcu_core_dec");
+ if (IS_ERR(xvcu->core_dec)) {
+ dev_err(&pdev->dev, "Could not get vcu_core_dec clock\n");
+ return PTR_ERR(xvcu->core_dec);
}
- xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
- if (IS_ERR(xvcu->aclk)) {
- dev_err(&pdev->dev, "Could not get aclk clock\n");
- return PTR_ERR(xvcu->aclk);
+ xvcu->mcu_enc = devm_clk_get(pdev->dev.parent, "vcu_mcu_enc");
+ if (IS_ERR(xvcu->mcu_enc)) {
+ dev_err(&pdev->dev, "Could not get mcu_enc clock\n");
+ return PTR_ERR(xvcu->mcu_enc);
}
- xvcu->pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
- if (IS_ERR(xvcu->pll_ref)) {
- dev_err(&pdev->dev, "Could not get pll_ref clock\n");
- return PTR_ERR(xvcu->pll_ref);
+ xvcu->mcu_dec = devm_clk_get(pdev->dev.parent, "vcu_mcu_dec");
+ if (IS_ERR(xvcu->mcu_dec)) {
+ dev_err(&pdev->dev, "Could not get mcu_dec clock\n");
+ return PTR_ERR(xvcu->mcu_dec);
}
- ret = clk_prepare_enable(xvcu->aclk);
+ /* Do the PLL Settings based on the ref clk,core and mcu clk freq */
+ ret = xvcu_set_vcu_pll(xvcu);
if (ret) {
- dev_err(&pdev->dev, "aclk clock enable failed\n");
+ dev_err(&pdev->dev, "Failed to set the pll\n");
return ret;
}
- ret = clk_prepare_enable(xvcu->pll_ref);
- if (ret) {
- dev_err(&pdev->dev, "pll_ref clock enable failed\n");
- goto error_aclk;
- }
-
- /*
- * Do the Gasket isolation and put the VCU out of reset
- * Bit 0 : Gasket isolation
- * Bit 1 : put VCU out of reset
- */
- xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
+ dev_set_drvdata(&pdev->dev, xvcu);
- /* Do the PLL Settings based on the ref clk,core and mcu clk freq */
- ret = xvcu_set_pll(xvcu);
+ ret = devm_of_platform_populate(pdev->dev.parent);
if (ret) {
- dev_err(&pdev->dev, "Failed to set the pll\n");
- goto error_pll_ref;
+ dev_err(&pdev->dev, "Failed to register allegro codecs\n");
+ return ret;
}
- dev_set_drvdata(&pdev->dev, xvcu);
-
dev_info(&pdev->dev, "%s: Probed successfully\n", __func__);
- return 0;
-
-error_pll_ref:
- clk_disable_unprepare(xvcu->pll_ref);
-error_aclk:
- clk_disable_unprepare(xvcu->aclk);
return ret;
}
/**
- * xvcu_remove - Insert gasket isolation
+ * xvcu_remove - Depopulate the child nodes, Insert gasket isolation
* and disable the clock
* @pdev: Pointer to the platform_device structure
*
@@ -592,32 +325,33 @@ error_aclk:
*/
static int xvcu_remove(struct platform_device *pdev)
{
- struct xvcu_device *xvcu;
+ struct xvcu_priv *xvcu;
xvcu = platform_get_drvdata(pdev);
if (!xvcu)
return -ENODEV;
- /* Add the the Gasket isolation and put the VCU in reset. */
- xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
+ clk_disable_unprepare(xvcu->core_enc);
+ devm_clk_put(pdev->dev.parent, xvcu->core_enc);
+
+ clk_disable_unprepare(xvcu->core_dec);
+ devm_clk_put(pdev->dev.parent, xvcu->core_dec);
+
+ clk_disable_unprepare(xvcu->mcu_enc);
+ devm_clk_put(pdev->dev.parent, xvcu->mcu_enc);
+
+ clk_disable_unprepare(xvcu->mcu_dec);
+ devm_clk_put(pdev->dev.parent, xvcu->mcu_dec);
clk_disable_unprepare(xvcu->pll_ref);
- clk_disable_unprepare(xvcu->aclk);
+ devm_clk_put(pdev->dev.parent, xvcu->pll_ref);
return 0;
}
-static const struct of_device_id xvcu_of_id_table[] = {
- { .compatible = "xlnx,vcu" },
- { .compatible = "xlnx,vcu-logicoreip-1.0" },
- { }
-};
-MODULE_DEVICE_TABLE(of, xvcu_of_id_table);
-
static struct platform_driver xvcu_driver = {
.driver = {
.name = "xilinx-vcu",
- .of_match_table = xvcu_of_id_table,
},
.probe = xvcu_probe,
.remove = xvcu_remove,
@@ -628,3 +362,4 @@ module_platform_driver(xvcu_driver);
MODULE_AUTHOR("Dhaval Shah <dshah@xilinx.com>");
MODULE_DESCRIPTION("Xilinx VCU init Driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:xilinx-vcu");
diff --git a/drivers/soc/xilinx/xlnx_vcu_clk.c b/drivers/soc/xilinx/xlnx_vcu_clk.c
new file mode 100644
index 000000000000..5263f0eac4c3
--- /dev/null
+++ b/drivers/soc/xilinx/xlnx_vcu_clk.c
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx VCU clock driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ * Tejas Patel <tejas.patel@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <soc/xilinx/xlnx_vcu.h>
+
+/* vcu slcr registers, bitmask and shift */
+#define VCU_PLL_CTRL 0x24
+#define VCU_PLL_CTRL_RESET_MASK BIT(0)
+#define VCU_PLL_CTRL_RESET_SHIFT 0
+#define VCU_PLL_CTRL_BYPASS_MASK BIT(3)
+#define VCU_PLL_CTRL_BYPASS_SHIFT 3
+#define VCU_PLL_CTRL_FBDIV_MASK 0x7f
+#define VCU_PLL_CTRL_FBDIV_SHIFT 8
+#define VCU_PLL_CTRL_POR_IN_MASK BIT(1)
+#define VCU_PLL_CTRL_POR_IN_SHIFT 1
+#define VCU_PLL_CTRL_PWR_POR_MASK BIT(2)
+#define VCU_PLL_CTRL_PWR_POR_SHIFT 2
+#define VCU_PLL_CTRL_CLKOUTDIV_MASK 0x03
+#define VCU_PLL_CTRL_CLKOUTDIV_SHIFT 16
+#define VCU_PLL_CTRL_DEFAULT 0
+
+#define VCU_PLL_CFG 0x28
+#define VCU_PLL_CFG_RES_MASK 0x0f
+#define VCU_PLL_CFG_RES_SHIFT 0
+#define VCU_PLL_CFG_CP_MASK 0x0f
+#define VCU_PLL_CFG_CP_SHIFT 5
+#define VCU_PLL_CFG_LFHF_MASK 0x03
+#define VCU_PLL_CFG_LFHF_SHIFT 10
+#define VCU_PLL_CFG_LOCK_CNT_MASK 0x03ff
+#define VCU_PLL_CFG_LOCK_CNT_SHIFT 13
+#define VCU_PLL_CFG_LOCK_DLY_MASK 0x7f
+#define VCU_PLL_CFG_LOCK_DLY_SHIFT 25
+#define VCU_ENC_CORE_CTRL 0x30
+#define VCU_ENC_MCU_CTRL 0x34
+#define VCU_ENC_MCU_CTRL_GATE_BIT BIT(12)
+#define VCU_DEC_CORE_CTRL 0x38
+#define VCU_DEC_MCU_CTRL 0x3c
+#define VCU_PLL_DIVISOR_MASK 0x3f
+#define VCU_PLL_DIVISOR_SHIFT 4
+#define VCU_SRCSEL_MASK 0x01
+#define VCU_SRCSEL_SHIFT 0
+#define VCU_SRCSEL_PLL 1
+
+#define VCU_PLL_STATUS 0x60
+#define VCU_PLL_STATUS_LOCK_STATUS_MASK 0x01
+#define VCU_PLL_LOCK_TIMEOUT 2000000
+
+#define PLL_FBDIV_MIN 25
+#define PLL_FBDIV_MAX 125
+
+#define MHZ 1000000
+#define FVCO_MIN (1500U * MHZ)
+#define FVCO_MAX (3000U * MHZ)
+#define DIVISOR_MIN 0
+#define DIVISOR_MAX 63
+#define FRAC 100
+#define LIMIT (10 * MHZ)
+
+#define FRAC_OFFSET 0x8
+#define PLLFCFG_FRAC_EN BIT(31)
+#define FRAC_DIV 0x10000 /* 2^16 */
+
+#define to_vcu_pll(_hw) container_of(_hw, struct vcu_pll, hw)
+#define div_mask(width) ((1 << (width)) - 1)
+
+enum pll_mode {
+ PLL_MODE_INT,
+ PLL_MODE_FRAC,
+};
+
+enum vcu_clks {
+ vcu_pll_half, vcu_core_enc, vcu_core_dec,
+ mcu_core_enc, mcu_core_dec, clk_max
+};
+
+/**
+ * struct xvcu_pll_cfg - Helper data
+ * @fbdiv: The integer portion of the feedback divider to the PLL
+ * @cp: PLL charge pump control
+ * @res: PLL loop filter resistor control
+ * @lfhf: PLL loop filter high frequency capacitor control
+ * @lock_dly: Lock circuit configuration settings for lock windowsize
+ * @lock_cnt: Lock circuit counter setting
+ */
+struct xvcu_pll_cfg {
+ u32 fbdiv;
+ u32 cp;
+ u32 res;
+ u32 lfhf;
+ u32 lock_dly;
+ u32 lock_cnt;
+};
+
+/**
+ * struct vcu_pll - VCU PLL control/status data
+ * @hw: Clock hardware
+ * @pll_ctrl: PLL control register address
+ * @pll_status: PLL status register address
+ * @pll_cfg: PLL config register address
+ * @lockbit: PLL lock status bit
+ */
+struct vcu_pll {
+ struct clk_hw hw;
+ void __iomem *pll_ctrl;
+ void __iomem *pll_status;
+ void __iomem *pll_cfg;
+ u8 lockbit;
+};
+
+static struct clk_hw_onecell_data *vcu_clk_data;
+static const char * const vcu_mux_parents[] = {
+ "dummy_name",
+ "vcu_pll_half"
+};
+
+static DEFINE_SPINLOCK(mcu_enc_lock);
+static DEFINE_SPINLOCK(mcu_dec_lock);
+static DEFINE_SPINLOCK(core_enc_lock);
+static DEFINE_SPINLOCK(core_dec_lock);
+
+static const struct xvcu_pll_cfg xvcu_pll_cfg[] = {
+ { 25, 3, 10, 3, 63, 1000 },
+ { 26, 3, 10, 3, 63, 1000 },
+ { 27, 4, 6, 3, 63, 1000 },
+ { 28, 4, 6, 3, 63, 1000 },
+ { 29, 4, 6, 3, 63, 1000 },
+ { 30, 4, 6, 3, 63, 1000 },
+ { 31, 6, 1, 3, 63, 1000 },
+ { 32, 6, 1, 3, 63, 1000 },
+ { 33, 4, 10, 3, 63, 1000 },
+ { 34, 5, 6, 3, 63, 1000 },
+ { 35, 5, 6, 3, 63, 1000 },
+ { 36, 5, 6, 3, 63, 1000 },
+ { 37, 5, 6, 3, 63, 1000 },
+ { 38, 5, 6, 3, 63, 975 },
+ { 39, 3, 12, 3, 63, 950 },
+ { 40, 3, 12, 3, 63, 925 },
+ { 41, 3, 12, 3, 63, 900 },
+ { 42, 3, 12, 3, 63, 875 },
+ { 43, 3, 12, 3, 63, 850 },
+ { 44, 3, 12, 3, 63, 850 },
+ { 45, 3, 12, 3, 63, 825 },
+ { 46, 3, 12, 3, 63, 800 },
+ { 47, 3, 12, 3, 63, 775 },
+ { 48, 3, 12, 3, 63, 775 },
+ { 49, 3, 12, 3, 63, 750 },
+ { 50, 3, 12, 3, 63, 750 },
+ { 51, 3, 2, 3, 63, 725 },
+ { 52, 3, 2, 3, 63, 700 },
+ { 53, 3, 2, 3, 63, 700 },
+ { 54, 3, 2, 3, 63, 675 },
+ { 55, 3, 2, 3, 63, 675 },
+ { 56, 3, 2, 3, 63, 650 },
+ { 57, 3, 2, 3, 63, 650 },
+ { 58, 3, 2, 3, 63, 625 },
+ { 59, 3, 2, 3, 63, 625 },
+ { 60, 3, 2, 3, 63, 625 },
+ { 61, 3, 2, 3, 63, 600 },
+ { 62, 3, 2, 3, 63, 600 },
+ { 63, 3, 2, 3, 63, 600 },
+ { 64, 3, 2, 3, 63, 600 },
+ { 65, 3, 2, 3, 63, 600 },
+ { 66, 3, 2, 3, 63, 600 },
+ { 67, 3, 2, 3, 63, 600 },
+ { 68, 3, 2, 3, 63, 600 },
+ { 69, 3, 2, 3, 63, 600 },
+ { 70, 3, 2, 3, 63, 600 },
+ { 71, 3, 2, 3, 63, 600 },
+ { 72, 3, 2, 3, 63, 600 },
+ { 73, 3, 2, 3, 63, 600 },
+ { 74, 3, 2, 3, 63, 600 },
+ { 75, 3, 2, 3, 63, 600 },
+ { 76, 3, 2, 3, 63, 600 },
+ { 77, 3, 2, 3, 63, 600 },
+ { 78, 3, 2, 3, 63, 600 },
+ { 79, 3, 2, 3, 63, 600 },
+ { 80, 3, 2, 3, 63, 600 },
+ { 81, 3, 2, 3, 63, 600 },
+ { 82, 3, 2, 3, 63, 600 },
+ { 83, 4, 2, 3, 63, 600 },
+ { 84, 4, 2, 3, 63, 600 },
+ { 85, 4, 2, 3, 63, 600 },
+ { 86, 4, 2, 3, 63, 600 },
+ { 87, 4, 2, 3, 63, 600 },
+ { 88, 4, 2, 3, 63, 600 },
+ { 89, 4, 2, 3, 63, 600 },
+ { 90, 4, 2, 3, 63, 600 },
+ { 91, 4, 2, 3, 63, 600 },
+ { 92, 4, 2, 3, 63, 600 },
+ { 93, 4, 2, 3, 63, 600 },
+ { 94, 4, 2, 3, 63, 600 },
+ { 95, 4, 2, 3, 63, 600 },
+ { 96, 4, 2, 3, 63, 600 },
+ { 97, 4, 2, 3, 63, 600 },
+ { 98, 4, 2, 3, 63, 600 },
+ { 99, 4, 2, 3, 63, 600 },
+ { 100, 4, 2, 3, 63, 600 },
+ { 101, 4, 2, 3, 63, 600 },
+ { 102, 4, 2, 3, 63, 600 },
+ { 103, 5, 2, 3, 63, 600 },
+ { 104, 5, 2, 3, 63, 600 },
+ { 105, 5, 2, 3, 63, 600 },
+ { 106, 5, 2, 3, 63, 600 },
+ { 107, 3, 4, 3, 63, 600 },
+ { 108, 3, 4, 3, 63, 600 },
+ { 109, 3, 4, 3, 63, 600 },
+ { 110, 3, 4, 3, 63, 600 },
+ { 111, 3, 4, 3, 63, 600 },
+ { 112, 3, 4, 3, 63, 600 },
+ { 113, 3, 4, 3, 63, 600 },
+ { 114, 3, 4, 3, 63, 600 },
+ { 115, 3, 4, 3, 63, 600 },
+ { 116, 3, 4, 3, 63, 600 },
+ { 117, 3, 4, 3, 63, 600 },
+ { 118, 3, 4, 3, 63, 600 },
+ { 119, 3, 4, 3, 63, 600 },
+ { 120, 3, 4, 3, 63, 600 },
+ { 121, 3, 4, 3, 63, 600 },
+ { 122, 3, 4, 3, 63, 600 },
+ { 123, 3, 4, 3, 63, 600 },
+ { 124, 3, 4, 3, 63, 600 },
+ { 125, 3, 4, 3, 63, 600 },
+};
+
+static int xvcu_divider_get_val(unsigned long rate, unsigned long parent_rate,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags)
+{
+ unsigned int div;
+
+ if (flags & CLK_DIVIDER_ROUND_CLOSEST)
+ div = DIV_ROUND_CLOSEST_ULL((u64)parent_rate, rate);
+ else
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+
+ return min_t(unsigned int, div, div_mask(width));
+}
+
+static unsigned long xvcu_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int val;
+
+ val = readl(divider->reg) >> divider->shift;
+ val &= div_mask(divider->width);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+}
+
+static long xvcu_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int bestdiv;
+
+ bestdiv = xvcu_divider_get_val(rate, *prate, divider->table,
+ divider->width, divider->flags);
+
+ *prate = rate * bestdiv;
+
+ return rate;
+}
+
+static int xvcu_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int value;
+ u32 val;
+
+ value = xvcu_divider_get_val(rate, parent_rate, divider->table,
+ divider->width, divider->flags);
+ if (value < 0)
+ return value;
+
+ val = readl(divider->reg);
+ val &= ~(div_mask(divider->width) << divider->shift);
+ val |= (u32)value << divider->shift;
+ writel(val, divider->reg);
+
+ return 0;
+}
+
+static const struct clk_ops xvcu_divider_ops = {
+ .recalc_rate = xvcu_divider_recalc_rate,
+ .round_rate = xvcu_divider_round_rate,
+ .set_rate = xvcu_divider_set_rate,
+};
+
+/**
+ * xvcu_register_divider - Register custom divider hardware
+ * @dev: VCU clock device
+ * @name: Divider name
+ * @parent_name: Divider parent name
+ * @flags: Clock flags
+ * @reg: Divider register base address
+ * @shift: Divider bits shift
+ * @width: Divider bits width
+ * @clk_divider_flags: Divider specific flags
+ * @lock: Shared register lock
+ *
+ * Register custom divider hardware to CCF.
+ *
+ * Return: Clock hardware for generated clock
+ */
+static struct clk_hw *xvcu_register_divider(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *reg, u8 shift,
+ u8 width, u8 clk_divider_flags,
+ spinlock_t *lock)
+{
+ struct clk_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &xvcu_divider_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+/**
+ * xvcu_pll_bypass_ctrl - Enable/Disable PLL bypass mode
+ * @pll: PLL data
+ * @enable: Enable/Disable flag
+ *
+ * Enable/Disable PLL bypass mode:
+ * 0 - Disable
+ * 1 - Enable
+ */
+static void xvcu_pll_bypass_ctrl(struct vcu_pll *pll, bool enable)
+{
+ u32 reg;
+
+ reg = readl(pll->pll_ctrl);
+ if (enable)
+ reg |= VCU_PLL_CTRL_BYPASS_MASK;
+ else
+ reg &= ~VCU_PLL_CTRL_BYPASS_MASK;
+ writel(reg, pll->pll_ctrl);
+}
+
+/**
+ * xvcu_pll_config - Configure PLL based on FBDIV value
+ * @pll: PLL data
+ *
+ * PLL needs to be configured before taking out of reset. Configuration
+ * data depends on the value of FBDIV for proper PLL locking.
+ */
+static void xvcu_pll_config(struct vcu_pll *pll)
+{
+ unsigned int fbdiv, reg;
+ int i;
+
+ reg = readl(pll->pll_ctrl);
+ fbdiv = (reg >> VCU_PLL_CTRL_FBDIV_SHIFT) & VCU_PLL_CTRL_FBDIV_MASK;
+
+ for (i = ARRAY_SIZE(xvcu_pll_cfg) - 1; i >= 0; i--) {
+ if (fbdiv != xvcu_pll_cfg[i].fbdiv)
+ continue;
+
+ /* Set RES, CP, LFHF, LOCK_CNT and LOCK_DLY cfg values */
+ reg = (xvcu_pll_cfg[i].res << VCU_PLL_CFG_RES_SHIFT) |
+ (xvcu_pll_cfg[i].cp << VCU_PLL_CFG_CP_SHIFT) |
+ (xvcu_pll_cfg[i].lfhf << VCU_PLL_CFG_LFHF_SHIFT) |
+ (xvcu_pll_cfg[i].lock_cnt << VCU_PLL_CFG_LOCK_CNT_SHIFT) |
+ (xvcu_pll_cfg[i].lock_dly << VCU_PLL_CFG_LOCK_DLY_SHIFT);
+ writel(reg, pll->pll_cfg);
+ }
+}
+
+/**
+ * xvcu_pll_enable_disable - Enable/Disable PLL
+ * @pll: PLL data
+ * @enable: Enable/Disable flag
+ *
+ * Enable/Disable PLL based on request:
+ * 0 - Disable
+ * 1 - Enable
+ */
+static void xvcu_pll_enable_disable(struct vcu_pll *pll, bool enable)
+{
+ u32 reg;
+
+ reg = readl(pll->pll_ctrl);
+ if (enable)
+ reg &= ~(VCU_PLL_CTRL_RESET_MASK | VCU_PLL_CTRL_POR_IN_MASK |
+ VCU_PLL_CTRL_PWR_POR_MASK);
+ else
+ reg |= (VCU_PLL_CTRL_RESET_MASK | VCU_PLL_CTRL_POR_IN_MASK |
+ VCU_PLL_CTRL_PWR_POR_MASK);
+ writel(reg, pll->pll_ctrl);
+}
+
+/**
+ * xvcu_pll_is_enabled - Check if PLL is enabled or not
+ * @hw: Clock hardware
+ *
+ * Check if PLL is enabled or not. PLL enabled means PLL is not in
+ * reset state.
+ *
+ * Return: PLL status (0 - Disabled, 1 - Enabled)
+ */
+static int xvcu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ u32 reg;
+
+ reg = readl(pll->pll_ctrl);
+
+ return !(reg & (VCU_PLL_CTRL_RESET_MASK | VCU_PLL_CTRL_POR_IN_MASK |
+ VCU_PLL_CTRL_PWR_POR_MASK));
+}
+
+/**
+ * xvcu_pll_enable - Enable PLL
+ * @hw: Clock hardware
+ *
+ * Enable PLL if it is not enabled. Configure PLL, enable and wait for
+ * the PLL lock. Put PLL into bypass state during PLL configuration.
+ *
+ * Return: 0 on success else error code
+ */
+static int xvcu_pll_enable(struct clk_hw *hw)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ u32 reg;
+ int ret;
+
+ if (xvcu_pll_is_enabled(hw))
+ return 0;
+
+ pr_info("VCU PLL: enable\n");
+
+ xvcu_pll_bypass_ctrl(pll, 1);
+
+ xvcu_pll_config(pll);
+
+ xvcu_pll_enable_disable(pll, 1);
+
+ ret = readl_poll_timeout_atomic(pll->pll_status, reg,
+ reg & VCU_PLL_STATUS_LOCK_STATUS_MASK,
+ 1, VCU_PLL_LOCK_TIMEOUT);
+ if (ret) {
+ pr_err("VCU PLL is not locked\n");
+ return ret;
+ }
+
+ xvcu_pll_bypass_ctrl(pll, 0);
+
+ return ret;
+}
+
+/**
+ * xvcu_pll_disable - Disable PLL
+ * @hw: Clock hardware
+ *
+ * Disable PLL if it is enabled.
+ *
+ * Return: 0 on success else error code
+ */
+static void xvcu_pll_disable(struct clk_hw *hw)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+
+ if (!xvcu_pll_is_enabled(hw))
+ return;
+
+ pr_info("PLL: shutdown\n");
+ xvcu_pll_enable_disable(pll, 0);
+}
+
+/**
+ * xvcu_pll_frac_get_mode - Get PLL fraction mode
+ * @hw: Clock hardware
+ *
+ * Check if PLL is configured for integer mode or fraction mode.
+ *
+ * Return: PLL mode:
+ * PLL_MODE_FRAC - Fraction mode
+ * PLL_MODE_INT - Integer mode
+ */
+static inline enum pll_mode xvcu_pll_frac_get_mode(struct clk_hw *hw)
+{
+ struct vcu_pll *clk = to_vcu_pll(hw);
+ u32 reg;
+
+ reg = readl(clk->pll_ctrl + FRAC_OFFSET);
+
+ reg = reg & PLLFCFG_FRAC_EN;
+ return reg ? PLL_MODE_FRAC : PLL_MODE_INT;
+}
+
+/**
+ * xvcu_pll_frac_set_mode - Set PLL fraction mode
+ * @hw: Clock hardware
+ * @on: Enable/Disable flag
+ *
+ * Configure PLL for integer mode or fraction mode.
+ * 1 - Fraction mode
+ * 0 - Integer mode
+ */
+static inline void xvcu_pll_frac_set_mode(struct clk_hw *hw, bool on)
+{
+ struct vcu_pll *clk = to_vcu_pll(hw);
+ u32 reg = 0;
+
+ if (on)
+ reg = PLLFCFG_FRAC_EN;
+
+ reg = readl(clk->pll_ctrl + FRAC_OFFSET);
+ reg |= PLLFCFG_FRAC_EN;
+ writel(reg, (clk->pll_ctrl + FRAC_OFFSET));
+}
+
+static long vcu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u32 fbdiv;
+ long rate_div, f;
+
+ /* Enable the fractional mode if needed */
+ rate_div = (rate * FRAC_DIV) / *prate;
+ f = rate_div % FRAC_DIV;
+ xvcu_pll_frac_set_mode(hw, !!f);
+
+ if (xvcu_pll_frac_get_mode(hw) == PLL_MODE_FRAC) {
+ if (rate > FVCO_MAX) {
+ fbdiv = rate / FVCO_MAX;
+ rate = rate / (fbdiv + 1);
+ }
+ if (rate < FVCO_MIN) {
+ fbdiv = DIV_ROUND_UP(FVCO_MIN, rate);
+ rate = rate * fbdiv;
+ }
+ return rate;
+ }
+
+ fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
+ fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
+ return *prate * fbdiv;
+}
+
+static unsigned long vcu_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ u32 fbdiv, data, reg;
+ unsigned long rate, frac;
+
+ reg = readl(pll->pll_ctrl);
+ fbdiv = (reg >> VCU_PLL_CTRL_FBDIV_SHIFT) & VCU_PLL_CTRL_FBDIV_MASK;
+
+ rate = parent_rate * fbdiv;
+ if (xvcu_pll_frac_get_mode(hw) == PLL_MODE_FRAC) {
+ data = (readl(pll->pll_ctrl + FRAC_OFFSET) & 0xFFFF);
+ frac = (parent_rate * data) / FRAC_DIV;
+ rate = rate + frac;
+ }
+
+ return rate;
+}
+
+static int vcu_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ u32 fbdiv, reg;
+ long rate_div, frac, m, f;
+
+ if (xvcu_pll_frac_get_mode(hw) == PLL_MODE_FRAC) {
+ rate_div = ((rate * FRAC_DIV) / parent_rate);
+ m = rate_div / FRAC_DIV;
+ f = rate_div % FRAC_DIV;
+ m = clamp_t(u32, m, (PLL_FBDIV_MIN), (PLL_FBDIV_MAX));
+ rate = parent_rate * m;
+ frac = (parent_rate * f) / FRAC_DIV;
+ reg = readl(pll->pll_ctrl);
+ reg &= ~(VCU_PLL_CTRL_FBDIV_MASK << VCU_PLL_CTRL_FBDIV_SHIFT);
+ reg |= m << VCU_PLL_CTRL_FBDIV_SHIFT;
+ writel(reg, pll->pll_ctrl);
+
+ reg = readl(pll->pll_ctrl + FRAC_OFFSET);
+ reg &= ~0xFFFF;
+ reg |= (f & 0xFFFF);
+ writel(reg, pll->pll_ctrl + FRAC_OFFSET);
+
+ return (rate + frac);
+ }
+
+ fbdiv = DIV_ROUND_CLOSEST(rate, parent_rate);
+ fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
+ reg = readl(pll->pll_ctrl);
+ reg &= ~(VCU_PLL_CTRL_FBDIV_MASK << VCU_PLL_CTRL_FBDIV_SHIFT);
+ reg |= fbdiv << VCU_PLL_CTRL_FBDIV_SHIFT;
+ writel(reg, pll->pll_ctrl);
+
+ return parent_rate * fbdiv;
+}
+
+static const struct clk_ops vcu_pll_ops = {
+ .enable = xvcu_pll_enable,
+ .disable = xvcu_pll_disable,
+ .is_enabled = xvcu_pll_is_enabled,
+ .round_rate = vcu_pll_round_rate,
+ .recalc_rate = vcu_pll_recalc_rate,
+ .set_rate = vcu_pll_set_rate,
+};
+
+/**
+ * xvcu_register_pll - Register VCU PLL
+ * @dev: VCU clock device
+ * @name: PLL name
+ * @parent: PLL parent
+ * @reg_base: PLL register base address
+ * @flags: Hardware specific flags
+ *
+ * Register PLL to CCF.
+ *
+ * Return: Clock hardware for generated clock
+ */
+static struct clk_hw *xvcu_register_pll(struct device *dev, const char *name,
+ const char *parent,
+ void __iomem *reg_base,
+ unsigned long flags)
+{
+ struct vcu_pll *pll;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ init.name = name;
+ init.parent_names = &parent;
+ init.ops = &vcu_pll_ops;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ pll = devm_kmalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pll->hw.init = &init;
+ pll->pll_ctrl = reg_base + VCU_PLL_CTRL;
+ pll->pll_status = reg_base + VCU_PLL_STATUS;
+ pll->pll_cfg = reg_base + VCU_PLL_CFG;
+ pll->lockbit = VCU_PLL_STATUS_LOCK_STATUS_MASK;
+
+ hw = &pll->hw;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk_hw_set_rate_range(hw, FVCO_MIN, FVCO_MAX);
+ if (ret < 0)
+ pr_err("%s:ERROR clk_set_rate_range failed %d\n", name, ret);
+
+ return hw;
+}
+
+/**
+ * register_vcu_leaf_clocks - Register VCU leaf clocks
+ * @dev: VCU clock device
+ * @name: Clock name
+ * @parents: Clock parents
+ * @nparents: Clock parent count
+ * @default_parent: Default parent to set
+ * @reg: Clock control register address
+ * @lock: Clock register access lock
+ *
+ * Register VCU leaf clocks. These clocks are MCU/core
+ * encoder and decoder clocks. Topology for these clocks
+ * are Mux, Divisor and Gate.
+ *
+ * Return: Clock hardware for the generated gate clock
+ */
+static struct clk_hw *register_vcu_leaf_clocks(struct device *dev,
+ const char *name,
+ const char * const *parents,
+ u8 nparents,
+ struct clk *default_parent,
+ void __iomem *reg,
+ spinlock_t *lock)
+{
+ char *clk_mux, *clk_div;
+ struct clk_hw *hw;
+
+ clk_mux = devm_kasprintf(dev, GFP_KERNEL, "%s%s", name, "_mux");
+ hw = clk_hw_register_mux(dev, clk_mux, parents, nparents,
+ CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT,
+ reg, VCU_SRCSEL_SHIFT, 1, 0, lock);
+
+ if (default_parent)
+ clk_set_parent(hw->clk, default_parent);
+
+ clk_div = devm_kasprintf(dev, GFP_KERNEL, "%s%s", name, "_div");
+ xvcu_register_divider(dev, clk_div, clk_mux,
+ CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT,
+ reg, VCU_PLL_DIVISOR_SHIFT, 6,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO |
+ CLK_DIVIDER_ROUND_CLOSEST,
+ lock);
+
+ return clk_hw_register_gate(dev, name, clk_div,
+ CLK_SET_RATE_PARENT,
+ reg, 12, 0, lock);
+}
+
+/**
+ * unregister_vcu_leaf_clocks - Unegister VCU leaf clocks
+ * @hw: VCU leaf clock hardware
+ *
+ * Unregister VCU leaf clocks. These clocks are MCU/core
+ * encoder and decoder clocks. Unregister clocks in order
+ * from gate, div and mux maintaining their parent dependency.
+ *
+ */
+static void unregister_vcu_leaf_clocks(struct clk_hw *hw)
+{
+ struct clk_hw *parent;
+
+ parent = clk_hw_get_parent(hw);
+ clk_hw_unregister_gate(hw);
+ hw = parent;
+
+ parent = clk_hw_get_parent(hw);
+ clk_hw_unregister_divider(hw);
+ hw = parent;
+
+ clk_hw_unregister_mux(hw);
+}
+
+/**
+ * xvcu_clock_init - Initialize VCU clocks
+ * @dev: VCU clock device
+ * @reg_base: Clock register base address
+ *
+ * Register VCU PLL and clocks and add VCU to clock provider list.
+ *
+ * Return: 0 on success else error code.
+ */
+static int xvcu_clock_init(struct device *dev, void __iomem *reg_base)
+{
+ struct clk_hw *hw;
+ struct clk *ref_clk;
+ const char *parent;
+ u32 vcu_pll_ctrl, clkoutdiv;
+ int i;
+
+ ref_clk = devm_clk_get(dev, "pll_ref");
+ if (IS_ERR(ref_clk)) {
+ dev_err(dev, "failed to get pll_ref clock\n");
+ return PTR_ERR(ref_clk);
+ }
+
+ vcu_clk_data = devm_kzalloc(dev, sizeof(*vcu_clk_data) +
+ sizeof(*vcu_clk_data->hws) * clk_max,
+ GFP_KERNEL);
+ if (!vcu_clk_data)
+ return -ENOMEM;
+
+ parent = __clk_get_name(ref_clk);
+ hw = xvcu_register_pll(dev, "vcu_pll", parent, reg_base,
+ CLK_SET_RATE_NO_REPARENT);
+ if (IS_ERR(hw)) {
+ dev_err(dev, "VCU PLL registration failed\n");
+ return PTR_ERR(hw);
+ }
+
+ /*
+ * The divide-by-2 should be always enabled (== 1) to meet the timing
+ * in the design. Otherwise, it's an error
+ */
+ vcu_pll_ctrl = readl(reg_base + VCU_PLL_CTRL);
+ clkoutdiv = vcu_pll_ctrl >> VCU_PLL_CTRL_CLKOUTDIV_SHIFT;
+ clkoutdiv = clkoutdiv & VCU_PLL_CTRL_CLKOUTDIV_MASK;
+ if (clkoutdiv != 1) {
+ dev_err(dev, "clkoutdiv is invalid\n");
+ return -EINVAL;
+ }
+
+ vcu_clk_data->hws[vcu_pll_half] =
+ clk_hw_register_fixed_factor(dev, "vcu_pll_half", "vcu_pll",
+ CLK_SET_RATE_NO_REPARENT |
+ CLK_SET_RATE_PARENT,
+ 1, 2);
+
+ vcu_clk_data->hws[vcu_core_enc] =
+ register_vcu_leaf_clocks(dev, "vcu_core_enc_clk",
+ vcu_mux_parents, 2,
+ vcu_clk_data->hws[vcu_pll_half]->clk,
+ reg_base + VCU_ENC_CORE_CTRL,
+ &core_enc_lock);
+ vcu_clk_data->hws[vcu_core_dec] =
+ register_vcu_leaf_clocks(dev, "vcu_core_dec_clk",
+ vcu_mux_parents, 2,
+ vcu_clk_data->hws[vcu_pll_half]->clk,
+ reg_base + VCU_DEC_CORE_CTRL,
+ &core_dec_lock);
+ vcu_clk_data->hws[mcu_core_enc] =
+ register_vcu_leaf_clocks(dev, "mcu_core_enc_clk",
+ vcu_mux_parents, 2,
+ vcu_clk_data->hws[vcu_pll_half]->clk,
+ reg_base + VCU_ENC_MCU_CTRL,
+ &mcu_enc_lock);
+ vcu_clk_data->hws[mcu_core_dec] =
+ register_vcu_leaf_clocks(dev, "mcu_core_dec_clk",
+ vcu_mux_parents, 2,
+ vcu_clk_data->hws[vcu_pll_half]->clk,
+ reg_base + VCU_DEC_MCU_CTRL,
+ &mcu_dec_lock);
+
+ for (i = 0; i < clk_max; i++) {
+ if (IS_ERR(vcu_clk_data->hws[i])) {
+ dev_err(dev, "clk %d: register failed with %ld\n",
+ i, PTR_ERR(vcu_clk_data->hws[i]));
+ }
+ }
+
+ vcu_clk_data->num = clk_max;
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ vcu_clk_data);
+}
+
+static int xvcu_clk_probe(struct platform_device *pdev)
+{
+ struct xvcu_device *xvcu = dev_get_drvdata(pdev->dev.parent);
+ int ret;
+
+ ret = xvcu_clock_init(pdev->dev.parent, xvcu->vcu_slcr_ba);
+ if (ret)
+ dev_err(&pdev->dev, "clock init fail with error %d\n", ret);
+ else
+ dev_dbg(&pdev->dev, "clock init successful\n");
+
+ return ret;
+}
+
+static int xvcu_clk_remove(struct platform_device *pdev)
+{
+ unregister_vcu_leaf_clocks(vcu_clk_data->hws[vcu_core_enc]);
+ unregister_vcu_leaf_clocks(vcu_clk_data->hws[vcu_core_dec]);
+ unregister_vcu_leaf_clocks(vcu_clk_data->hws[mcu_core_enc]);
+ unregister_vcu_leaf_clocks(vcu_clk_data->hws[mcu_core_dec]);
+ clk_hw_unregister(vcu_clk_data->hws[vcu_pll_half]);
+ of_clk_del_provider(pdev->dev.parent->of_node);
+
+ devm_kfree(pdev->dev.parent, vcu_clk_data);
+
+ return 0;
+}
+
+static struct platform_driver xvcu_clk_driver = {
+ .driver = {
+ .name = "xilinx-vcu-clk",
+ },
+ .probe = xvcu_clk_probe,
+ .remove = xvcu_clk_remove,
+};
+
+module_platform_driver(xvcu_clk_driver);
+
+MODULE_AUTHOR("Rajan Vaja <rajan.vaja@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx VCU clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:xilinx-vcu-clk");
diff --git a/drivers/soc/xilinx/xlnx_vcu_core.c b/drivers/soc/xilinx/xlnx_vcu_core.c
new file mode 100644
index 000000000000..0415b283c133
--- /dev/null
+++ b/drivers/soc/xilinx/xlnx_vcu_core.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx VCU core driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/of_platform.h>
+#include <soc/xilinx/xlnx_vcu.h>
+
+static const struct mfd_cell xvcu_devs[] = {
+ {
+ .name = "xilinx-vcu-clk",
+ },
+ {
+ .name = "xilinx-vcu",
+ },
+};
+
+static int xvcu_core_probe(struct platform_device *pdev)
+{
+ struct xvcu_device *xvcu;
+ struct resource *res;
+ int ret;
+
+ xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
+ if (!xvcu)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vcu_slcr");
+ if (!res) {
+ dev_err(&pdev->dev, "get vcu_slcr memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xvcu->vcu_slcr_ba) {
+ dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "logicore");
+ if (!res) {
+ dev_err(&pdev->dev, "get logicore memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xvcu->logicore_reg_ba) {
+ dev_err(&pdev->dev, "logicore register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(&pdev->dev, xvcu);
+
+ xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
+ if (IS_ERR(xvcu->aclk)) {
+ dev_err(&pdev->dev, "Could not get aclk clock\n");
+ return PTR_ERR(xvcu->aclk);
+ }
+
+ ret = clk_prepare_enable(xvcu->aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "aclk clock enable failed\n");
+ return ret;
+ }
+
+ /*
+ * Do the Gasket isolation and put the VCU out of reset
+ * Bit 0 : Gasket isolation
+ * Bit 1 : put VCU out of reset
+ */
+ xvcu->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(xvcu->reset_gpio)) {
+ ret = PTR_ERR(xvcu->reset_gpio);
+ dev_err(&pdev->dev, "failed to get reset gpio for vcu.\n");
+ return ret;
+ }
+
+ if (xvcu->reset_gpio) {
+ gpiod_set_value(xvcu->reset_gpio, 0);
+ /* min 2 clock cycle of vcu pll_ref, slowest freq is 33.33KHz */
+ usleep_range(60, 120);
+ gpiod_set_value(xvcu->reset_gpio, 1);
+ usleep_range(60, 120);
+ } else {
+ dev_warn(&pdev->dev, "No reset gpio info from dts for vcu. This may lead to incorrect functionality if VCU isolation is removed post initialization.\n");
+ }
+
+ iowrite32(VCU_GASKET_VALUE, xvcu->logicore_reg_ba + VCU_GASKET_INIT);
+
+ ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, xvcu_devs,
+ ARRAY_SIZE(xvcu_devs), NULL, 0, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add MFD devices %d\n", ret);
+ goto err_mfd_add_devices;
+ }
+
+ dev_dbg(&pdev->dev, "Successfully added MFD devices\n");
+
+ return 0;
+
+err_mfd_add_devices:
+ /* Add the the Gasket isolation and put the VCU in reset. */
+ iowrite32(0, xvcu->logicore_reg_ba + VCU_GASKET_INIT);
+
+ clk_disable_unprepare(xvcu->aclk);
+
+ return ret;
+}
+
+static int xvcu_core_remove(struct platform_device *pdev)
+{
+ struct xvcu_device *xvcu;
+
+ xvcu = platform_get_drvdata(pdev);
+ if (!xvcu)
+ return -ENODEV;
+
+ mfd_remove_devices(&pdev->dev);
+
+ /* Add the the Gasket isolation and put the VCU in reset. */
+ if (xvcu->reset_gpio) {
+ gpiod_set_value(xvcu->reset_gpio, 0);
+ /* min 2 clock cycle of vcu pll_ref, slowest freq is 33.33KHz */
+ usleep_range(60, 120);
+ gpiod_set_value(xvcu->reset_gpio, 1);
+ usleep_range(60, 120);
+ }
+ iowrite32(0, xvcu->logicore_reg_ba + VCU_GASKET_INIT);
+
+ clk_disable_unprepare(xvcu->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xvcu_core_of_id_table[] = {
+ { .compatible = "xlnx,vcu" },
+ { .compatible = "xlnx,vcu-logicoreip-1.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvcu_core_of_id_table);
+
+static struct platform_driver xvcu_core_driver = {
+ .driver = {
+ .name = "xilinx-vcu-core",
+ .of_match_table = xvcu_core_of_id_table,
+ },
+ .probe = xvcu_core_probe,
+ .remove = xvcu_core_remove,
+};
+
+module_platform_driver(xvcu_core_driver);
+
+MODULE_AUTHOR("Rajan Vaja <rajan.vaja@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx VCU core Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/xilinx/zynqmp/Makefile b/drivers/soc/xilinx/zynqmp/Makefile
new file mode 100644
index 000000000000..29d19cb442bc
--- /dev/null
+++ b/drivers/soc/xilinx/zynqmp/Makefile
@@ -0,0 +1 @@
+obj-y += tap_delays.o
diff --git a/drivers/soc/xilinx/zynqmp/tap_delays.c b/drivers/soc/xilinx/zynqmp/tap_delays.c
new file mode 100644
index 000000000000..cf1836ea99c3
--- /dev/null
+++ b/drivers/soc/xilinx/zynqmp/tap_delays.c
@@ -0,0 +1,69 @@
+/*
+ * Xilinx Zynq MPSoC Tap Delay Programming
+ *
+ * Copyright (C) 2016 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/soc/xilinx/zynqmp/tap_delays.h>
+
+/**
+ * arasan_zynqmp_set_tap_delay - Program the tap delays.
+ * @deviceid: Unique Id of device
+ * @itap_delay: Input Tap Delay
+ * @oitap_delay: Output Tap Delay
+ */
+void arasan_zynqmp_set_tap_delay(u8 deviceid, u8 itap_delay, u8 otap_delay)
+{
+ u32 node_id = (deviceid == 0) ? NODE_SD_0 : NODE_SD_1;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops->ioctl)
+ return;
+
+ /* Set the Input Tap Delay */
+ if (itap_delay)
+ eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
+ PM_TAPDELAY_INPUT, itap_delay, NULL);
+
+ /* Set the Output Tap Delay */
+ if (otap_delay)
+ eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
+ PM_TAPDELAY_OUTPUT, otap_delay, NULL);
+}
+EXPORT_SYMBOL_GPL(arasan_zynqmp_set_tap_delay);
+
+/**
+ * arasan_zynqmp_dll_reset - Issue the DLL reset.
+ * @deviceid: Unique Id of device
+ */
+void zynqmp_dll_reset(u8 deviceid)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops->ioctl)
+ return;
+
+ /* Issue DLL Reset */
+ if (deviceid == 0)
+ eemi_ops->ioctl(NODE_SD_0, IOCTL_SD_DLL_RESET,
+ PM_DLL_RESET_PULSE, 0, NULL);
+ else
+ eemi_ops->ioctl(NODE_SD_1, IOCTL_SD_DLL_RESET,
+ PM_DLL_RESET_PULSE, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_dll_reset);
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 1b9d14411a15..01a2e9dc1fe5 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -9,13 +9,31 @@
* Rajan Vaja <rajan.vaja@xilinx.com>
*/
+#include <linux/compiler.h>
+#include <linux/interrupt.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
+#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/mailbox/zynqmp-ipi-message.h>
+
+/**
+ * struct zynqmp_pm_work_struct - Wrapper for struct work_struct
+ * @callback_work: Work structure
+ * @args: Callback arguments
+ */
+struct zynqmp_pm_work_struct {
+ struct work_struct callback_work;
+ u32 args[CB_ARG_CNT];
+};
+static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
+static struct mbox_chan *rx_chan;
+static const struct zynqmp_eemi_ops *eemi_ops;
enum pm_suspend_mode {
PM_SUSPEND_MODE_FIRST = 0,
@@ -31,7 +49,6 @@ static const char *const suspend_modes[] = {
};
static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
-static const struct zynqmp_eemi_ops *eemi_ops;
enum pm_api_cb_id {
PM_INIT_SUSPEND_CB = 30,
@@ -68,6 +85,53 @@ static irqreturn_t zynqmp_pm_isr(int irq, void *data)
return IRQ_HANDLED;
}
+static void ipi_receive_callback(struct mbox_client *cl, void *data)
+{
+ struct zynqmp_ipi_message *msg = (struct zynqmp_ipi_message *)data;
+ u32 payload[IPI_BUF_LEN_MAX];
+ int ret;
+
+ memcpy(payload, msg->data, sizeof(msg->len));
+ /* First element is callback API ID, others are callback arguments */
+ if (payload[0] == PM_INIT_SUSPEND_CB) {
+ if (work_pending(&zynqmp_pm_init_suspend_work->callback_work))
+ return;
+
+ /* Copy callback arguments into work's structure */
+ memcpy(zynqmp_pm_init_suspend_work->args, &payload[1],
+ sizeof(zynqmp_pm_init_suspend_work->args));
+
+ queue_work(system_unbound_wq,
+ &zynqmp_pm_init_suspend_work->callback_work);
+
+ /* Send NULL message to mbox controller to ack the message */
+ ret = mbox_send_message(rx_chan, NULL);
+ if (ret)
+ pr_err("IPI ack failed. Error %d\n", ret);
+ }
+}
+
+/**
+ * zynqmp_pm_init_suspend_work_fn - Initialize suspend
+ * @work: Pointer to work_struct
+ *
+ * Bottom-half of PM callback IRQ handler.
+ */
+static void zynqmp_pm_init_suspend_work_fn(struct work_struct *work)
+{
+ struct zynqmp_pm_work_struct *pm_work =
+ container_of(work, struct zynqmp_pm_work_struct, callback_work);
+
+ if (pm_work->args[0] == SUSPEND_SYSTEM_SHUTDOWN) {
+ orderly_poweroff(true);
+ } else if (pm_work->args[0] == SUSPEND_POWER_REQUEST) {
+ pm_suspend(PM_SUSPEND_MEM);
+ } else {
+ pr_err("%s Unsupported InitSuspendCb reason code %d.\n",
+ __func__, pm_work->args[0]);
+ }
+}
+
static ssize_t suspend_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -119,6 +183,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
{
int ret, irq;
u32 pm_api_version;
+ struct mbox_client *client;
eemi_ops = zynqmp_pm_get_eemi_ops();
if (IS_ERR(eemi_ops))
@@ -134,17 +199,46 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
if (pm_api_version < ZYNQMP_PM_VERSION)
return -ENODEV;
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -ENXIO;
-
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, zynqmp_pm_isr,
- IRQF_NO_SUSPEND | IRQF_ONESHOT,
- dev_name(&pdev->dev), &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed "
- "with %d\n", irq, ret);
- return ret;
+ if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) {
+ zynqmp_pm_init_suspend_work =
+ devm_kzalloc(&pdev->dev,
+ sizeof(struct zynqmp_pm_work_struct),
+ GFP_KERNEL);
+ if (!zynqmp_pm_init_suspend_work)
+ return -ENOMEM;
+
+ INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
+ zynqmp_pm_init_suspend_work_fn);
+ client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->dev = &pdev->dev;
+ client->rx_callback = ipi_receive_callback;
+
+ rx_chan = mbox_request_channel_byname(client, "rx");
+ if (IS_ERR(rx_chan)) {
+ dev_err(&pdev->dev, "Failed to request rx channel\n");
+ return IS_ERR(rx_chan);
+ }
+ } else if (of_find_property(pdev->dev.of_node, "interrupts", NULL)) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ zynqmp_pm_isr,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&pdev->dev),
+ &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "devm_request_threaded_irq '%d' "
+ "failed with %d\n", irq, ret);
+ return ret;
+ }
+ } else {
+ dev_err(&pdev->dev, "Required property not found in DT node\n");
+ return -ENOENT;
}
ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
@@ -160,6 +254,9 @@ static int zynqmp_pm_remove(struct platform_device *pdev)
{
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+ if (!rx_chan)
+ mbox_free_channel(rx_chan);
+
return 0;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 30a40280c157..f6971237b7cc 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -851,11 +851,18 @@ config SPI_XTENSA_XTFPGA
config SPI_ZYNQ_QSPI
tristate "Xilinx Zynq QSPI controller"
- depends on ARCH_ZYNQ || COMPILE_TEST
+ depends on ARCH_ZYNQ
+ depends on SPI_MASTER
help
- This enables support for the Zynq Quad SPI controller
- in master mode.
- This controller only supports SPI memory interface.
+ This selects the Xilinx ZYNQ Quad SPI controller master driver.
+
+config SPI_ZYNQ_QSPI_DUAL_STACKED
+ bool "Xilinx Zynq QSPI Dual stacked configuration"
+ depends on SPI_ZYNQ_QSPI
+ help
+ This selects the Xilinx ZYNQ Quad SPI controller in dual stacked mode.
+ Enable this option if your hw design is using dual stacked
+ configuration.
config SPI_ZYNQMP_GQSPI
tristate "Xilinx ZynqMP GQSPI controller"
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index de0ba3e5449f..550f3ff36fa3 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -348,6 +348,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
xfers[xferpos].len = op->dummy.nbytes;
xfers[xferpos].tx_nbits = op->dummy.buswidth;
+ xfers[xferpos].dummy = op->dummy.nbytes * 8;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->dummy.nbytes;
@@ -362,6 +363,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].tx_nbits = op->data.buswidth;
}
+ xfers[xferpos].stripe = update_stripe(op->cmd.opcode);
xfers[xferpos].len = op->data.nbytes;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index d5f9d5fbb3e8..b3d2cc5cb6ec 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -16,10 +16,11 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
#include <linux/spi/xilinx_spi.h>
#include <linux/io.h>
-
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
#define XILINX_SPI_MAX_CS 32
#define XILINX_SPI_NAME "xilinx_spi"
@@ -27,8 +28,18 @@
/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
* Product Specification", DS464
*/
-#define XSPI_CR_OFFSET 0x60 /* Control Register */
-
+/* Register Offsets */
+#define XSPI_CR_OFFSET 0x60
+#define XSPI_SR_OFFSET 0x64
+#define XSPI_TXD_OFFSET 0x68
+#define XSPI_RXD_OFFSET 0x6c
+#define XSPI_SSR_OFFSET 0x70
+#define XIPIF_V123B_DGIER_OFFSET 0x1c
+#define XIPIF_V123B_IISR_OFFSET 0x20
+#define XIPIF_V123B_IIER_OFFSET 0x28
+#define XIPIF_V123B_RESETR_OFFSET 0x40
+
+/* Register bit masks */
#define XSPI_CR_LOOP 0x01
#define XSPI_CR_ENABLE 0x02
#define XSPI_CR_MASTER_MODE 0x04
@@ -41,133 +52,203 @@
#define XSPI_CR_MANUAL_SSELECT 0x80
#define XSPI_CR_TRANS_INHIBIT 0x100
#define XSPI_CR_LSB_FIRST 0x200
-
-#define XSPI_SR_OFFSET 0x64 /* Status Register */
-
-#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
-#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
-#define XSPI_SR_TX_EMPTY_MASK 0x04 /* Transmit FIFO is empty */
-#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
-#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
-
-#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */
-#define XSPI_RXD_OFFSET 0x6c /* Data Receive Register */
-
-#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
-
-/* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414
- * IPIF registers are 32 bit
- */
-#define XIPIF_V123B_DGIER_OFFSET 0x1c /* IPIF global int enable reg */
+#define XSPI_SR_RX_EMPTY_MASK 0x01
+#define XSPI_SR_RX_FULL_MASK 0x02
+#define XSPI_SR_TX_EMPTY_MASK 0x04
+#define XSPI_SR_TX_FULL_MASK 0x08
+#define XSPI_SR_MODE_FAULT_MASK 0x10
#define XIPIF_V123B_GINTR_ENABLE 0x80000000
-
-#define XIPIF_V123B_IISR_OFFSET 0x20 /* IPIF interrupt status reg */
-#define XIPIF_V123B_IIER_OFFSET 0x28 /* IPIF interrupt enable reg */
-
-#define XSPI_INTR_MODE_FAULT 0x01 /* Mode fault error */
-#define XSPI_INTR_SLAVE_MODE_FAULT 0x02 /* Selected as slave while
- * disabled */
-#define XSPI_INTR_TX_EMPTY 0x04 /* TxFIFO is empty */
-#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
-#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
-#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
-#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */
-
-#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
-#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
-
+#define XSPI_INTR_MODE_FAULT 0x01
+#define XSPI_INTR_SLAVE_MODE_FAULT 0x02
+#define XSPI_INTR_TX_EMPTY 0x04
+#define XSPI_INTR_TX_UNDERRUN 0x08
+#define XSPI_INTR_RX_FULL 0x10
+#define XSPI_INTR_RX_OVERRUN 0x20
+#define XSPI_INTR_TX_HALF_EMPTY 0x40
+#define XIPIF_V123B_RESET_MASK 0x0a
+
+/* Number of bits per word */
+#define XSPI_ONE_BITS_PER_WORD 1
+#define XSPI_TWO_BITS_PER_WORD 2
+#define XSPI_FOUR_BITS_PER_WORD 4
+
+/* Number of data lines used to receive */
+#define XSPI_RX_ONE_WIRE 1
+#define XSPI_RX_FOUR_WIRE 4
+
+/* Auto suspend timeout in milliseconds */
+#define SPI_AUTOSUSPEND_TIMEOUT 3000
+
+/* Command used for Dummy read Id */
+#define SPI_READ_ID 0x9F
+
+/**
+ * struct xilinx_spi - This definition define spi driver instance
+ * @regs: virt. address of the control registers
+ * @irq: IRQ number
+ * @axi_clk: Pointer to the AXI clock
+ * @axi4_clk: Pointer to the AXI4 clock
+ * @spi_clk: Pointer to the SPI clock
+ * @dev: Pointer to the device
+ * @rx_ptr: Pointer to the RX buffer
+ * @tx_ptr: Pointer to the TX buffer
+ * @bytes_per_word: Number of bytes in a word
+ * @buffer_size: Buffer size in words
+ * @cs_inactive: Level of the CS pins when inactive
+ * @read_fn: For reading data from SPI registers
+ * @write_fn: For writing data to SPI registers
+ * @bytes_to_transfer: Number of bytes left to transfer
+ * @bytes_to_receive: Number of bytes left to receive
+ * @rx_bus_width: Number of wires used to receive data
+ * @tx_fifo: For writing data to fifo
+ * @rx_fifo: For reading data from fifo
+ */
struct xilinx_spi {
- /* bitbang has to be first */
- struct spi_bitbang bitbang;
- struct completion done;
- void __iomem *regs; /* virt. address of the control registers */
-
- int irq;
-
- u8 *rx_ptr; /* pointer in the Tx buffer */
- const u8 *tx_ptr; /* pointer in the Rx buffer */
+ void __iomem *regs;
+ int irq;
+ struct clk *axi_clk;
+ struct clk *axi4_clk;
+ struct clk *spi_clk;
+ struct device *dev;
+ u8 *rx_ptr;
+ const u8 *tx_ptr;
u8 bytes_per_word;
- int buffer_size; /* buffer size in words */
- u32 cs_inactive; /* Level of the CS pins when inactive*/
- unsigned int (*read_fn)(void __iomem *);
- void (*write_fn)(u32, void __iomem *);
+ int buffer_size;
+ u32 cs_inactive;
+ unsigned int (*read_fn)(void __iomem *addr);
+ void (*write_fn)(u32, void __iomem *addr);
+ u32 bytes_to_transfer;
+ u32 bytes_to_receive;
+ u32 rx_bus_width;
+ void (*tx_fifo)(struct xilinx_spi *xqspi);
+ void (*rx_fifo)(struct xilinx_spi *xqspi);
};
+/**
+ * XSPI_FIFO_READ - Generate xspi_read_rx_fifo_* functions
+ * @size: bits_per_word that are read from RX FIFO
+ * @type: C type of value argument
+ *
+ * Generates xspi_read_rx_fifo_* functions used to write
+ * data into RX FIFO for different transaction widths.
+ */
+#define XSPI_FIFO_READ(size, type) \
+static void xspi_read_rx_fifo_##size(struct xilinx_spi *xqspi) \
+{ \
+ int i; \
+ int count = (xqspi->bytes_to_receive > xqspi->buffer_size) ? \
+ xqspi->buffer_size : xqspi->bytes_to_receive; \
+ u32 data; \
+ for (i = 0; i < count; i += (size/8)) { \
+ data = readl_relaxed(xqspi->regs + XSPI_RXD_OFFSET); \
+ if (xqspi->rx_ptr) \
+ ((type *)xqspi->rx_ptr)[i] = (type)data; \
+ } \
+ xqspi->bytes_to_receive -= count; \
+ if (xqspi->rx_ptr) \
+ xqspi->rx_ptr += count; \
+}
+
+/**
+ * XSPI_FIFO_WRITE - Generate xspi_fill_tx_fifo_* functions
+ * @size: bits_per_word that are written into TX FIFO
+ * @type: C type of value argument
+ *
+ * Generates xspi_fill_tx_fifo_* functions used to write
+ * data into TX FIFO for different transaction widths.
+ */
+#define XSPI_FIFO_WRITE(size, type) \
+static void xspi_fill_tx_fifo_##size(struct xilinx_spi *xqspi) \
+{ \
+ int i; \
+ int count = (xqspi->bytes_to_transfer > xqspi->buffer_size) ? \
+ xqspi->buffer_size : xqspi->bytes_to_transfer; \
+ u32 data = 0; \
+ for (i = 0; i < count; i += (size/8)) { \
+ if (xqspi->tx_ptr) \
+ data = (type)((u8 *)xqspi->tx_ptr)[i]; \
+ writel_relaxed(data, (xqspi->regs + XSPI_TXD_OFFSET)); \
+ } \
+ xqspi->bytes_to_transfer -= count; \
+ if (xqspi->tx_ptr) \
+ xqspi->tx_ptr += count; \
+}
+
+XSPI_FIFO_READ(8, u8)
+XSPI_FIFO_READ(16, u16)
+XSPI_FIFO_READ(32, u32)
+XSPI_FIFO_WRITE(8, u8)
+XSPI_FIFO_WRITE(16, u16)
+XSPI_FIFO_WRITE(32, u32)
+
+/**
+ * xspi_write32 - Write a value to the device register little endian
+ * @val: Value to write at the Register offset
+ * @addr: Register offset
+ *
+ * Write data to the paricular SPI register
+ */
static void xspi_write32(u32 val, void __iomem *addr)
{
iowrite32(val, addr);
}
+/**
+ * xspi_read32 - read a value from the device register little endian
+ * @addr: Register offset
+ *
+ * Read data from the paricular SPI register
+ *
+ * Return: return value from the SPI register.
+ */
static unsigned int xspi_read32(void __iomem *addr)
{
return ioread32(addr);
}
+/**
+ * xspi_write32_be - Write a value to the device register big endian
+ * @val: Value to write at the Register offset
+ * @addr: Register offset
+ *
+ * Write data to the paricular SPI register
+ */
static void xspi_write32_be(u32 val, void __iomem *addr)
{
iowrite32be(val, addr);
}
+/**
+ * xspi_read32_be - read a value from the device register big endian
+ * @addr: Register offset
+ *
+ * Read data from the paricular SPI register
+ *
+ * Return: return value from the SPI register.
+ */
static unsigned int xspi_read32_be(void __iomem *addr)
{
return ioread32be(addr);
}
-static void xilinx_spi_tx(struct xilinx_spi *xspi)
-{
- u32 data = 0;
-
- if (!xspi->tx_ptr) {
- xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
- return;
- }
-
- switch (xspi->bytes_per_word) {
- case 1:
- data = *(u8 *)(xspi->tx_ptr);
- break;
- case 2:
- data = *(u16 *)(xspi->tx_ptr);
- break;
- case 4:
- data = *(u32 *)(xspi->tx_ptr);
- break;
- }
-
- xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET);
- xspi->tx_ptr += xspi->bytes_per_word;
-}
-
-static void xilinx_spi_rx(struct xilinx_spi *xspi)
-{
- u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
-
- if (!xspi->rx_ptr)
- return;
-
- switch (xspi->bytes_per_word) {
- case 1:
- *(u8 *)(xspi->rx_ptr) = data;
- break;
- case 2:
- *(u16 *)(xspi->rx_ptr) = data;
- break;
- case 4:
- *(u32 *)(xspi->rx_ptr) = data;
- break;
- }
-
- xspi->rx_ptr += xspi->bytes_per_word;
-}
-
+/**
+ * xspi_init_hw - Initialize the hardware
+ * @xspi: Pointer to the zynqmp_qspi structure
+ *
+ * This function performs the following actions
+ * - Disable and clear all the interrupts
+ * - Enable manual slave select
+ * - Enable the SPI controller
+ */
static void xspi_init_hw(struct xilinx_spi *xspi)
{
void __iomem *regs_base = xspi->regs;
/* Reset the SPI device */
xspi->write_fn(XIPIF_V123B_RESET_MASK,
- regs_base + XIPIF_V123B_RESETR_OFFSET);
- /* Enable the transmit empty interrupt, which we use to determine
+ regs_base + XIPIF_V123B_RESETR_OFFSET);
+ /*
+ * Enable the transmit empty interrupt, which we use to determine
* progress on the transmission.
*/
xspi->write_fn(XSPI_INTR_TX_EMPTY,
@@ -176,262 +257,457 @@ static void xspi_init_hw(struct xilinx_spi *xspi)
xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
/* Deselect the slave on the SPI bus */
xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
- /* Disable the transmitter, enable Manual Slave Select Assertion,
- * put SPI controller into master mode, and enable it */
+ /*
+ * Disable the transmitter, enable Manual Slave Select Assertion,
+ * put SPI controller into master mode, and enable it
+ */
xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE |
XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET,
regs_base + XSPI_CR_OFFSET);
}
-static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
+/**
+ * xspi_chipselect - Select or deselect the chip select line
+ * @qspi: Pointer to the spi_device structure
+ * @is_high: Select(0) or deselect (1) the chip select line
+ *
+ */
+static void xspi_chipselect(struct spi_device *qspi, bool is_high)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
- u16 cr;
+ struct xilinx_spi *xqspi = spi_master_get_devdata(qspi->master);
u32 cs;
- if (is_on == BITBANG_CS_INACTIVE) {
- /* Deselect the slave on the SPI bus */
- xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
- return;
+ if (is_high) {
+ /* Deselect the slave */
+ xqspi->write_fn(xqspi->cs_inactive,
+ xqspi->regs + XSPI_SSR_OFFSET);
+ } else {
+ cs = xqspi->cs_inactive;
+ cs ^= BIT(qspi->chip_select);
+ /* Activate the chip select */
+ xqspi->write_fn(cs, xqspi->regs + XSPI_SSR_OFFSET);
}
+}
- /* Set the SPI clock phase and polarity */
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK;
- if (spi->mode & SPI_CPHA)
- cr |= XSPI_CR_CPHA;
- if (spi->mode & SPI_CPOL)
- cr |= XSPI_CR_CPOL;
- if (spi->mode & SPI_LSB_FIRST)
- cr |= XSPI_CR_LSB_FIRST;
- if (spi->mode & SPI_LOOP)
- cr |= XSPI_CR_LOOP;
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
-
- /* We do not check spi->max_speed_hz here as the SPI clock
- * frequency is not software programmable (the IP block design
- * parameter)
- */
+/**
+ * xilinx_spi_irq - Interrupt service routine of the SPI controller
+ * @irq: IRQ number
+ * @dev_id: Pointer to the xspi structure
+ *
+ * This function handles TX empty only.
+ * On TX empty interrupt this function reads the received data from RX FIFO
+ * and fills the TX FIFO if there is any data remaining to be transferred.
+ *
+ * Return: IRQ_HANDLED when interrupt is handled
+ * IRQ_NONE otherwise.
+ */
+static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct xilinx_spi *xspi = spi_master_get_devdata(dev_id);
+ u32 ipif_isr;
+ int status = IRQ_NONE;
- cs = xspi->cs_inactive;
- cs ^= BIT(spi->chip_select);
+ /* Get the IPIF interrupts, and clear them immediately */
+ ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ if (ipif_isr & XSPI_INTR_TX_EMPTY) {
+ /* Transmission completed */
+ xspi->rx_fifo(xspi);
+ if (xspi->bytes_to_transfer) {
+ /* There is more data to send */
+ xspi->tx_fifo(xspi);
+ }
+ status = IRQ_HANDLED;
+ }
- /* Activate the chip select */
- xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET);
+ if (!xspi->bytes_to_receive && !xspi->bytes_to_transfer) {
+ spi_finalize_current_transfer(master);
+ /* Disable the interrupts here. */
+ xspi->write_fn(0x0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+ }
+
+ return status;
}
-/* spi_bitbang requires custom setup_transfer() to be defined if there is a
- * custom txrx_bufs().
+/**
+ * xilinx_spi_startup_block - Perform a dummy read as a
+ * work around for the startup block issue in the spi controller.
+ * @xspi: Pointer to the xilinx_spi structure
+ * @cs_num: chip select number.
+ *
+ * Perform a dummy read if startup block is enabled in the
+ * spi controller.
+ *
+ * Return: None
*/
-static int xilinx_spi_setup_transfer(struct spi_device *spi,
- struct spi_transfer *t)
+static void xilinx_spi_startup_block(struct xilinx_spi *xspi, u32 cs_num)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ void __iomem *regs_base = xspi->regs;
+ u32 chip_sel, config_reg, status_reg;
- if (spi->mode & SPI_CS_HIGH)
- xspi->cs_inactive &= ~BIT(spi->chip_select);
+ /* Activate the chip select */
+ chip_sel = xspi->cs_inactive;
+ chip_sel ^= BIT(cs_num);
+ xspi->write_fn(chip_sel, regs_base + XSPI_SSR_OFFSET);
+
+ /* Write ReadId to the TXD register */
+ xspi->write_fn(SPI_READ_ID, regs_base + XSPI_TXD_OFFSET);
+ xspi->write_fn(0x0, regs_base + XSPI_TXD_OFFSET);
+ xspi->write_fn(0x0, regs_base + XSPI_TXD_OFFSET);
+
+ config_reg = xspi->read_fn(regs_base + XSPI_CR_OFFSET);
+ /* Enable master transaction */
+ config_reg &= ~XSPI_CR_TRANS_INHIBIT;
+ xspi->write_fn(config_reg, regs_base + XSPI_CR_OFFSET);
+
+ status_reg = xspi->read_fn(regs_base + XSPI_SR_OFFSET);
+ while ((status_reg & XSPI_SR_TX_EMPTY_MASK) == 0)
+ status_reg = xspi->read_fn(regs_base + XSPI_SR_OFFSET);
+
+ /* Disable master transaction */
+ config_reg |= XSPI_CR_TRANS_INHIBIT;
+ xspi->write_fn(config_reg, regs_base + XSPI_CR_OFFSET);
+
+ /* Read the RXD Register */
+ status_reg = xspi->read_fn(regs_base + XSPI_SR_OFFSET);
+ while ((status_reg & XSPI_SR_RX_EMPTY_MASK) == 0) {
+ xspi->read_fn(regs_base + XSPI_RXD_OFFSET);
+ status_reg = xspi->read_fn(regs_base + XSPI_SR_OFFSET);
+ }
+
+ xspi_init_hw(xspi);
+}
+
+/**
+ * xspi_setup_transfer - Configure SPI controller for specified
+ * transfer
+ * @qspi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides
+ * information about next transfer setup parameters
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI
+ * transfer.
+ *
+ * Return: 0 always
+ */
+static int xspi_setup_transfer(struct spi_device *qspi,
+ struct spi_transfer *transfer)
+{
+ struct xilinx_spi *xqspi = spi_master_get_devdata(qspi->master);
+ u32 config_reg;
+
+ config_reg = xqspi->read_fn(xqspi->regs + XSPI_CR_OFFSET);
+ /* Set the QSPI clock phase and clock polarity */
+ config_reg &= ~(XSPI_CR_CPHA | XSPI_CR_CPOL);
+ if (qspi->mode & SPI_CPHA)
+ config_reg |= XSPI_CR_CPHA;
+ if (qspi->mode & SPI_CPOL)
+ config_reg |= XSPI_CR_CPOL;
+ if (qspi->mode & SPI_LSB_FIRST)
+ config_reg |= XSPI_CR_LSB_FIRST;
+ xqspi->write_fn(config_reg, xqspi->regs + XSPI_CR_OFFSET);
+
+ if (qspi->mode & SPI_CS_HIGH)
+ xqspi->cs_inactive &= ~BIT(qspi->chip_select);
else
- xspi->cs_inactive |= BIT(spi->chip_select);
+ xqspi->cs_inactive |= BIT(qspi->chip_select);
return 0;
}
-static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+/**
+ * xspi_setup - Configure the SPI controller
+ * @qspi: Pointer to the spi_device structure
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI
+ * transfer.
+ *
+ * Return: 0 on success; error value otherwise.
+ */
+static int xspi_setup(struct spi_device *qspi)
+{
+ int ret;
+ struct xilinx_spi *xqspi = spi_master_get_devdata(qspi->master);
+
+ if (qspi->master->busy)
+ return -EBUSY;
+
+ ret = pm_runtime_get_sync(xqspi->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = xspi_setup_transfer(qspi, NULL);
+ pm_runtime_put_sync(xqspi->dev);
+
+ return ret;
+}
+
+/**
+ * xspi_start_transfer - Initiates the SPI transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ * @qspi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provide information
+ * about next transfer parameters
+ *
+ * This function fills the TX FIFO, starts the SPI transfer, and waits for the
+ * transfer to be completed.
+ *
+ * Return: Number of bytes transferred in the last transfer
+ */
+
+static int xspi_start_transfer(struct spi_master *master,
+ struct spi_device *qspi,
+ struct spi_transfer *transfer)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
- int remaining_words; /* the number of words left to transfer */
- bool use_irq = false;
- u16 cr = 0;
-
- /* We get here with transmitter inhibited */
-
- xspi->tx_ptr = t->tx_buf;
- xspi->rx_ptr = t->rx_buf;
- remaining_words = t->len / xspi->bytes_per_word;
-
- if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) {
- u32 isr;
- use_irq = true;
- /* Inhibit irq to avoid spurious irqs on tx_empty*/
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
- xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
- xspi->regs + XSPI_CR_OFFSET);
- /* ACK old irqs (if any) */
- isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
- if (isr)
- xspi->write_fn(isr,
- xspi->regs + XIPIF_V123B_IISR_OFFSET);
- /* Enable the global IPIF interrupt */
- xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
- xspi->regs + XIPIF_V123B_DGIER_OFFSET);
- reinit_completion(&xspi->done);
+ struct xilinx_spi *xqspi = spi_master_get_devdata(master);
+ u32 cr;
+
+ xqspi->tx_ptr = transfer->tx_buf;
+ xqspi->rx_ptr = transfer->rx_buf;
+
+ if (transfer->dummy) {
+ xqspi->bytes_to_transfer = (transfer->len - (transfer->dummy/8))
+ + ((transfer->dummy/8) *
+ xqspi->rx_bus_width);
+ xqspi->bytes_to_receive = (transfer->len - (transfer->dummy/8))
+ + ((transfer->dummy/8) *
+ xqspi->rx_bus_width);
+ } else {
+ xqspi->bytes_to_transfer = transfer->len;
+ xqspi->bytes_to_receive = transfer->len;
}
- while (remaining_words) {
- int n_words, tx_words, rx_words;
- u32 sr;
- int stalled;
+ xspi_setup_transfer(qspi, transfer);
+ cr = xqspi->read_fn(xqspi->regs + XSPI_CR_OFFSET);
+ /* Enable master transaction inhibit */
+ cr |= XSPI_CR_TRANS_INHIBIT;
+ xqspi->write_fn(cr, xqspi->regs + XSPI_CR_OFFSET);
+ xqspi->tx_fifo(xqspi);
+ /* Disable master transaction inhibit */
+ cr &= ~XSPI_CR_TRANS_INHIBIT;
+ xqspi->write_fn(cr, xqspi->regs + XSPI_CR_OFFSET);
+ xqspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
+ xqspi->regs + XIPIF_V123B_DGIER_OFFSET);
+
+ return transfer->len;
+}
- n_words = min(remaining_words, xspi->buffer_size);
+/**
+ * xspi_prepare_transfer_hardware - Prepares hardware for transfer.
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function enables SPI master controller.
+ *
+ * Return: 0 on success; error value otherwise
+ */
+static int xspi_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct xilinx_spi *xqspi = spi_master_get_devdata(master);
- tx_words = n_words;
- while (tx_words--)
- xilinx_spi_tx(xspi);
+ u32 cr;
+ int ret;
- /* Start the transfer by not inhibiting the transmitter any
- * longer
- */
+ ret = pm_runtime_get_sync(xqspi->dev);
+ if (ret < 0)
+ return ret;
- if (use_irq) {
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
- wait_for_completion(&xspi->done);
- /* A transmit has just completed. Process received data
- * and check for more data to transmit. Always inhibit
- * the transmitter while the Isr refills the transmit
- * register/FIFO, or make sure it is stopped if we're
- * done.
- */
- xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
- xspi->regs + XSPI_CR_OFFSET);
- sr = XSPI_SR_TX_EMPTY_MASK;
- } else
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
-
- /* Read out all the data from the Rx FIFO */
- rx_words = n_words;
- stalled = 10;
- while (rx_words) {
- if (rx_words == n_words && !(stalled--) &&
- !(sr & XSPI_SR_TX_EMPTY_MASK) &&
- (sr & XSPI_SR_RX_EMPTY_MASK)) {
- dev_err(&spi->dev,
- "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
- xspi_init_hw(xspi);
- return -EIO;
- }
-
- if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
- xilinx_spi_rx(xspi);
- rx_words--;
- continue;
- }
-
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- if (!(sr & XSPI_SR_RX_EMPTY_MASK)) {
- xilinx_spi_rx(xspi);
- rx_words--;
- }
- }
+ cr = xqspi->read_fn(xqspi->regs + XSPI_CR_OFFSET);
+ cr |= XSPI_CR_ENABLE;
+ xqspi->write_fn(cr, xqspi->regs + XSPI_CR_OFFSET);
+
+ return 0;
+}
+
+/**
+ * xspi_unprepare_transfer_hardware - Relaxes hardware after transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function disables the SPI master controller.
+ *
+ * Return: Always 0
+ */
+static int xspi_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct xilinx_spi *xqspi = spi_master_get_devdata(master);
+ u32 cr;
+
+ cr = xqspi->read_fn(xqspi->regs + XSPI_CR_OFFSET);
+ cr &= ~XSPI_CR_ENABLE;
+ xqspi->write_fn(cr, xqspi->regs + XSPI_CR_OFFSET);
+
+ pm_runtime_put_sync(xqspi->dev);
+
+ return 0;
+}
+
+/**
+ * xilinx_spi_runtime_resume - Runtime resume method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function enables the clocks
+ *
+ * Return: 0 on success and error value on error
+ */
+static int __maybe_unused xilinx_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct xilinx_spi *xspi = spi_master_get_devdata(master);
+ int ret;
- remaining_words -= n_words;
+ ret = clk_enable(xspi->axi_clk);
+ if (ret) {
+ dev_err(dev, "Can not enable AXI clock\n");
+ return ret;
}
- if (use_irq) {
- xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+ ret = clk_enable(xspi->axi4_clk);
+ if (ret) {
+ dev_err(dev, "Can not enable AXI4 clock\n");
+ goto clk_disable_axi_clk;
}
- return t->len;
+ ret = clk_enable(xspi->spi_clk);
+ if (ret) {
+ dev_err(dev, "Can not enable SPI clock\n");
+ goto clk_disable_axi4_clk;
+ }
+
+ return 0;
+
+clk_disable_axi4_clk:
+ clk_disable(xspi->axi4_clk);
+clk_disable_axi_clk:
+ clk_disable(xspi->axi_clk);
+
+ return ret;
}
+/**
+ * xilinx_spi_runtime_suspend - Runtime suspend method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function disables the clocks
+ *
+ * Return: Always 0
+ */
+static int __maybe_unused xilinx_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct xilinx_spi *xspi = spi_master_get_devdata(master);
+
+ clk_disable(xspi->axi_clk);
+ clk_disable(xspi->axi4_clk);
+ clk_disable(xspi->spi_clk);
+
+ return 0;
+}
-/* This driver supports single master mode only. Hence Tx FIFO Empty
- * is the only interrupt we care about.
- * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
- * Fault are not to happen.
+/**
+ * xilinx_spi_resume - Resume method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * The function starts the SPI driver queue and initializes the SPI
+ * controller
+ *
+ * Return: 0 on success; error value otherwise
*/
-static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
+static int __maybe_unused xilinx_spi_resume(struct device *dev)
{
- struct xilinx_spi *xspi = dev_id;
- u32 ipif_isr;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct xilinx_spi *xspi = spi_master_get_devdata(master);
+ int ret = 0;
- /* Get the IPIF interrupts, and clear them immediately */
- ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
- xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ if (!pm_runtime_suspended(dev)) {
+ ret = xilinx_spi_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+ }
- if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
- complete(&xspi->done);
- return IRQ_HANDLED;
+ ret = spi_master_resume(master);
+ if (ret < 0) {
+ clk_disable(xspi->axi_clk);
+ clk_disable(xspi->axi4_clk);
+ clk_disable(xspi->spi_clk);
}
- return IRQ_NONE;
+ return ret;
}
-static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
+
+/**
+ * xilinx_spi_suspend - Suspend method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function stops the SPI driver queue and disables the SPI controller
+ *
+ * Return: Always 0
+ */
+static int __maybe_unused xilinx_spi_suspend(struct device *dev)
{
- u8 sr;
- int n_words = 0;
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret = 0;
- /*
- * Before the buffer_size detection we reset the core
- * to make sure we start with a clean state.
- */
- xspi->write_fn(XIPIF_V123B_RESET_MASK,
- xspi->regs + XIPIF_V123B_RESETR_OFFSET);
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ xilinx_spi_runtime_suspend(dev);
- /* Fill the Tx FIFO with as many words as possible */
- do {
- xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- n_words++;
- } while (!(sr & XSPI_SR_TX_FULL_MASK));
+ xspi_unprepare_transfer_hardware(master);
- return n_words;
+ return ret;
}
-static const struct of_device_id xilinx_spi_of_match[] = {
- { .compatible = "xlnx,axi-quad-spi-1.00.a", },
- { .compatible = "xlnx,xps-spi-2.00.a", },
- { .compatible = "xlnx,xps-spi-2.00.b", },
- {}
+static const struct dev_pm_ops xilinx_spi_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(xilinx_spi_runtime_suspend,
+ xilinx_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(xilinx_spi_suspend, xilinx_spi_resume)
};
-MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
+/**
+ * xilinx_spi_probe - Probe method for the SPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ *
+ * Return: 0 on success; error value otherwise
+ */
static int xilinx_spi_probe(struct platform_device *pdev)
{
struct xilinx_spi *xspi;
- struct xspi_platform_data *pdata;
struct resource *res;
int ret, num_cs = 0, bits_per_word = 8;
+ u32 cs_num;
struct spi_master *master;
- u32 tmp;
- u8 i;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata) {
- num_cs = pdata->num_chipselect;
- bits_per_word = pdata->bits_per_word;
- } else {
- of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits",
- &num_cs);
- }
+ struct device_node *nc;
+ u32 tmp, rx_bus_width, fifo_size;
+ bool startup_block;
- if (!num_cs) {
- dev_err(&pdev->dev,
- "Missing slave select configuration data\n");
- return -EINVAL;
- }
+ of_property_read_u32(pdev->dev.of_node, "num-cs",
+ &num_cs);
+ if (!num_cs)
+ num_cs = 1;
if (num_cs > XILINX_SPI_MAX_CS) {
dev_err(&pdev->dev, "Invalid number of spi slaves\n");
return -EINVAL;
}
+ startup_block = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,startup-block");
+
master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
if (!master)
return -ENODEV;
- /* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
- SPI_CS_HIGH;
-
xspi = spi_master_get_devdata(master);
- xspi->cs_inactive = 0xffffffff;
- xspi->bitbang.master = master;
- xspi->bitbang.chipselect = xilinx_spi_chipselect;
- xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
- xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
- init_completion(&xspi->done);
-
+ master->dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, master);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xspi->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(xspi->regs)) {
@@ -439,20 +715,104 @@ static int xilinx_spi_probe(struct platform_device *pdev)
goto put_master;
}
- master->bus_num = pdev->id;
- master->num_chipselect = num_cs;
- master->dev.of_node = pdev->dev.of_node;
+ ret = of_property_read_u32(pdev->dev.of_node, "fifo-size",
+ &fifo_size);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Missing fifo size\n");
+ return -EINVAL;
+ }
+ of_property_read_u32(pdev->dev.of_node, "bits-per-word",
+ &bits_per_word);
+
+ xspi->rx_bus_width = XSPI_ONE_BITS_PER_WORD;
+ for_each_available_child_of_node(pdev->dev.of_node, nc) {
+ if (startup_block) {
+ ret = of_property_read_u32(nc, "reg",
+ &cs_num);
+ if (ret < 0)
+ return -EINVAL;
+ }
+ ret = of_property_read_u32(nc, "spi-rx-bus-width",
+ &rx_bus_width);
+ if (!ret) {
+ xspi->rx_bus_width = rx_bus_width;
+ break;
+ }
+ }
+
+ xspi->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
+ if (IS_ERR(xspi->axi_clk)) {
+ if (PTR_ERR(xspi->axi_clk) != -ENOENT) {
+ ret = PTR_ERR(xspi->axi_clk);
+ goto put_master;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ xspi->axi_clk = NULL;
+ }
+
+ ret = clk_prepare(xspi->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare AXI clock\n");
+ goto put_master;
+ }
+
+ xspi->axi4_clk = devm_clk_get(&pdev->dev, "axi4_clk");
+ if (IS_ERR(xspi->axi4_clk)) {
+ if (PTR_ERR(xspi->axi4_clk) != -ENOENT) {
+ ret = PTR_ERR(xspi->axi4_clk);
+ goto clk_unprepare_axi_clk;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ xspi->axi4_clk = NULL;
+ }
+
+ ret = clk_prepare(xspi->axi4_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare AXI4 clock\n");
+ goto clk_unprepare_axi_clk;
+ }
+
+ xspi->spi_clk = devm_clk_get(&pdev->dev, "spi_clk");
+ if (IS_ERR(xspi->spi_clk)) {
+ if (PTR_ERR(xspi->spi_clk) != -ENOENT) {
+ ret = PTR_ERR(xspi->spi_clk);
+ goto clk_unprepare_axi4_clk;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ xspi->spi_clk = NULL;
+ }
+
+ ret = clk_prepare(xspi->spi_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare SPI clock\n");
+ goto clk_unprepare_axi4_clk;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto clk_unprepare_all;
+
+ xspi->dev = &pdev->dev;
- /*
- * Detect endianess on the IP via loop bit in CR. Detection
- * must be done before reset is sent because incorrect reset
- * value generates error interrupt.
- * Setup little endian helper functions first and try to use them
- * and check if bit was correctly setup or not.
- */
xspi->read_fn = xspi_read32;
xspi->write_fn = xspi_write32;
-
+ /* Detect endianness on the IP via loop bit in CR register*/
xspi->write_fn(XSPI_CR_LOOP, xspi->regs + XSPI_CR_OFFSET);
tmp = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
tmp &= XSPI_CR_LOOP;
@@ -461,62 +821,112 @@ static int xilinx_spi_probe(struct platform_device *pdev)
xspi->write_fn = xspi_write32_be;
}
- master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
- xspi->bytes_per_word = bits_per_word / 8;
- xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
-
+ xspi->buffer_size = fifo_size;
xspi->irq = platform_get_irq(pdev, 0);
if (xspi->irq < 0 && xspi->irq != -ENXIO) {
ret = xspi->irq;
- goto put_master;
+ goto clk_unprepare_all;
} else if (xspi->irq >= 0) {
/* Register for SPI Interrupt */
- ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
- dev_name(&pdev->dev), xspi);
+ ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq,
+ 0, dev_name(&pdev->dev), master);
if (ret)
- goto put_master;
+ goto clk_unprepare_all;
}
/* SPI controller initializations */
xspi_init_hw(xspi);
- ret = spi_bitbang_start(&xspi->bitbang);
- if (ret) {
- dev_err(&pdev->dev, "spi_bitbang_start FAILED\n");
- goto put_master;
+ pm_runtime_put(&pdev->dev);
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = num_cs;
+ master->setup = xspi_setup;
+ master->set_cs = xspi_chipselect;
+ master->transfer_one = xspi_start_transfer;
+ master->prepare_transfer_hardware = xspi_prepare_transfer_hardware;
+ master->unprepare_transfer_hardware = xspi_unprepare_transfer_hardware;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ xspi->bytes_per_word = bits_per_word / 8;
+ xspi->tx_fifo = xspi_fill_tx_fifo_8;
+ xspi->rx_fifo = xspi_read_rx_fifo_8;
+ if (xspi->rx_bus_width == XSPI_RX_ONE_WIRE) {
+ if (xspi->bytes_per_word == XSPI_TWO_BITS_PER_WORD) {
+ xspi->tx_fifo = xspi_fill_tx_fifo_16;
+ xspi->rx_fifo = xspi_read_rx_fifo_16;
+ } else if (xspi->bytes_per_word == XSPI_FOUR_BITS_PER_WORD) {
+ xspi->tx_fifo = xspi_fill_tx_fifo_32;
+ xspi->rx_fifo = xspi_read_rx_fifo_32;
+ }
+ } else if (xspi->rx_bus_width == XSPI_RX_FOUR_WIRE) {
+ master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
+ } else {
+ dev_err(&pdev->dev, "Dual Mode not supported\n");
+ goto clk_unprepare_all;
}
+ xspi->cs_inactive = 0xffffffff;
- dev_info(&pdev->dev, "at 0x%08llX mapped to 0x%p, irq=%d\n",
- (unsigned long long)res->start, xspi->regs, xspi->irq);
+ /*
+ * This is the work around for the startup block issue in
+ * the spi controller. SPI clock is passing through STARTUP
+ * block to FLASH. STARTUP block don't provide clock as soon
+ * as QSPI provides command. So first command fails.
+ */
+ if (startup_block)
+ xilinx_spi_startup_block(xspi, cs_num);
- if (pdata) {
- for (i = 0; i < pdata->num_devices; i++)
- spi_new_device(master, pdata->devices + i);
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ goto clk_unprepare_all;
}
- platform_set_drvdata(pdev, master);
- return 0;
+ return ret;
+clk_unprepare_all:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ clk_unprepare(xspi->spi_clk);
+clk_unprepare_axi4_clk:
+ clk_unprepare(xspi->axi4_clk);
+clk_unprepare_axi_clk:
+ clk_unprepare(xspi->axi_clk);
put_master:
spi_master_put(master);
return ret;
}
+/**
+ * xilinx_spi_remove - Remove method for the SPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 Always
+ */
static int xilinx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct xilinx_spi *xspi = spi_master_get_devdata(master);
void __iomem *regs_base = xspi->regs;
- spi_bitbang_stop(&xspi->bitbang);
-
/* Disable all the interrupts just in case */
xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
/* Disable the global IPIF interrupt */
xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
- spi_master_put(xspi->bitbang.master);
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(xspi->axi_clk);
+ clk_disable_unprepare(xspi->axi4_clk);
+ clk_disable_unprepare(xspi->spi_clk);
+
+ spi_unregister_master(master);
return 0;
}
@@ -524,12 +934,21 @@ static int xilinx_spi_remove(struct platform_device *pdev)
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:" XILINX_SPI_NAME);
+static const struct of_device_id xilinx_spi_of_match[] = {
+ { .compatible = "xlnx,axi-quad-spi-1.00.a", },
+ { .compatible = "xlnx,xps-spi-2.00.a", },
+ { .compatible = "xlnx,xps-spi-2.00.b", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
+
static struct platform_driver xilinx_spi_driver = {
.probe = xilinx_spi_probe,
.remove = xilinx_spi_remove,
.driver = {
.name = XILINX_SPI_NAME,
.of_match_table = xilinx_spi_of_match,
+ .pm = &xilinx_spi_dev_pm_ops,
},
};
module_platform_driver(xilinx_spi_driver);
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index c6bee67decb5..2041d5dcf294 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -129,6 +129,9 @@
* @rxbuf: Pointer to the RX buffer
* @tx_bytes: Number of bytes left to transfer
* @rx_bytes: Number of bytes left to receive
+ * @is_dual: Flag to indicate whether dual flash memories are used
+ * @is_instr: Flag to indicate if transfer contains an instruction
+ * (Used in dual parallel configuration)
* @data_completion: completion structure
*/
struct zynq_qspi {
@@ -141,6 +144,8 @@ struct zynq_qspi {
u8 *rxbuf;
int tx_bytes;
int rx_bytes;
+ u32 is_dual;
+ u8 is_instr;
struct completion data_completion;
};
@@ -213,6 +218,14 @@ static void zynq_qspi_init_hw(struct zynq_qspi *xqspi)
zynq_qspi_write(xqspi, ZYNQ_QSPI_TX_THRESH_OFFSET,
ZYNQ_QSPI_TX_THRESHOLD);
+ if (xqspi->is_dual)
+ /* Enable two memories on separate buses */
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET,
+ (ZYNQ_QSPI_LCFG_TWO_MEM_MASK |
+ ZYNQ_QSPI_LCFG_SEP_BUS_MASK |
+ (1 << ZYNQ_QSPI_LCFG_DUMMY_SHIFT) |
+ ZYNQ_QSPI_FAST_READ_QOUT_CODE));
+
zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET,
ZYNQ_QSPI_ENABLE_ENABLE_MASK);
}
@@ -236,15 +249,23 @@ static bool zynq_qspi_supports_op(struct spi_mem *mem,
* zynq_qspi_rxfifo_op - Read 1..4 bytes from RxFIFO to RX buffer
* @xqspi: Pointer to the zynq_qspi structure
* @size: Number of bytes to be read (1..4)
+ *
+ * Note: In case of dual parallel connection, even number of bytes are read
+ * when odd bytes are requested to avoid transfer of a nibble to each flash.
+ * The receive buffer though, is populated with the number of bytes requested.
*/
static void zynq_qspi_rxfifo_op(struct zynq_qspi *xqspi, unsigned int size)
{
+ unsigned int xsize;
u32 data;
data = zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
if (xqspi->rxbuf) {
- memcpy(xqspi->rxbuf, ((u8 *)&data) + 4 - size, size);
+ xsize = size;
+ if (xqspi->is_dual && !xqspi->is_instr && (size % 2))
+ xsize++;
+ memcpy(xqspi->rxbuf, ((u8 *)&data) + 4 - xsize, size);
xqspi->rxbuf += size;
}
@@ -257,12 +278,19 @@ static void zynq_qspi_rxfifo_op(struct zynq_qspi *xqspi, unsigned int size)
* zynq_qspi_txfifo_op - Write 1..4 bytes from TX buffer to TxFIFO
* @xqspi: Pointer to the zynq_qspi structure
* @size: Number of bytes to be written (1..4)
+ *
+ * In dual parallel configuration, when read/write data operations
+ * are performed, odd data bytes have to be converted to even to
+ * avoid a nibble (of data when programming / dummy when reading)
+ * going to individual flash devices, where a byte is expected.
+ * This check is only for data and will not apply for commands.
*/
static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
{
static const unsigned int offset[4] = {
ZYNQ_QSPI_TXD_00_01_OFFSET, ZYNQ_QSPI_TXD_00_10_OFFSET,
ZYNQ_QSPI_TXD_00_11_OFFSET, ZYNQ_QSPI_TXD_00_00_OFFSET };
+ unsigned int xsize;
u32 data;
if (xqspi->txbuf) {
@@ -274,7 +302,11 @@ static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
}
xqspi->tx_bytes -= size;
- zynq_qspi_write(xqspi, offset[size - 1], data);
+
+ xsize = size;
+ if (xqspi->is_dual && !xqspi->is_instr && (size % 2))
+ xsize++;
+ zynq_qspi_write(xqspi, offset[xsize - 1], data);
}
/**
@@ -295,6 +327,7 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
config_reg |= (((~(BIT(spi->chip_select))) <<
ZYNQ_QSPI_SS_SHIFT) &
ZYNQ_QSPI_CONFIG_SSCTRL_MASK);
+ xqspi->is_instr = 1;
} else {
config_reg |= ZYNQ_QSPI_CONFIG_SSCTRL_MASK;
}
@@ -637,6 +670,12 @@ static int zynq_qspi_probe(struct platform_device *pdev)
goto remove_master;
}
+ if (of_property_read_u32(pdev->dev.of_node, "is-dual",
+ &xqspi->is_dual)) {
+ dev_warn(&pdev->dev, "couldn't determine configuration info");
+ dev_warn(&pdev->dev, "about dual memories. defaulting to single memory\n");
+ }
+
xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(xqspi->pclk)) {
dev_err(&pdev->dev, "pclk clock not found.\n");
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index ddf408a6b60c..2b794f57964d 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -33,6 +34,7 @@
#define GQSPI_RXD_OFST 0x00000120
#define GQSPI_TX_THRESHOLD_OFST 0x00000128
#define GQSPI_RX_THRESHOLD_OFST 0x0000012C
+#define IOU_TAPDLY_BYPASS_OFST 0x0000003C
#define GQSPI_LPBK_DLY_ADJ_OFST 0x00000138
#define GQSPI_GEN_FIFO_OFST 0x00000140
#define GQSPI_SEL_OFST 0x00000144
@@ -47,6 +49,7 @@
#define GQSPI_QSPIDMA_DST_I_MASK_OFST 0x00000820
#define GQSPI_QSPIDMA_DST_ADDR_OFST 0x00000800
#define GQSPI_QSPIDMA_DST_ADDR_MSB_OFST 0x00000828
+#define GQSPI_DATA_DLY_ADJ_OFST 0x000001F8
/* GQSPI register bit masks */
#define GQSPI_SEL_MASK 0x00000001
@@ -132,12 +135,44 @@
#define GQSPI_SELECT_MODE_QUADSPI 0x4
#define GQSPI_DMA_UNALIGN 0x3
#define GQSPI_DEFAULT_NUM_CS 1 /* Default number of chip selects */
+#define GQSPI_RX_BUS_WIDTH_QUAD 0x4
+#define GQSPI_RX_BUS_WIDTH_DUAL 0x2
+#define GQSPI_RX_BUS_WIDTH_SINGLE 0x1
+#define GQSPI_TX_BUS_WIDTH_QUAD 0x4
+#define GQSPI_TX_BUS_WIDTH_DUAL 0x2
+#define GQSPI_TX_BUS_WIDTH_SINGLE 0x1
+#define GQSPI_LPBK_DLY_ADJ_LPBK_SHIFT 5
+#define GQSPI_LPBK_DLY_ADJ_DLY_1 0x2
+#define GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT 3
+#define GQSPI_LPBK_DLY_ADJ_DLY_0 0x3
+#define GQSPI_USE_DATA_DLY 0x1
+#define GQSPI_USE_DATA_DLY_SHIFT 31
+#define GQSPI_DATA_DLY_ADJ_VALUE 0x2
+#define GQSPI_DATA_DLY_ADJ_SHIFT 28
+#define TAP_DLY_BYPASS_LQSPI_RX_VALUE 0x1
+#define TAP_DLY_BYPASS_LQSPI_RX_SHIFT 2
+
+/* set to differentiate versal from zynqmp, 1=versal, 0=zynqmp */
+#define QSPI_QUIRK_HAS_TAPDELAY BIT(0)
+
+#define GQSPI_FREQ_40MHZ 40000000
+#define GQSPI_FREQ_100MHZ 100000000
+#define GQSPI_FREQ_150MHZ 150000000
+#define IOU_TAPDLY_BYPASS_MASK 0x7
#define SPI_AUTOSUSPEND_TIMEOUT 3000
enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
static const struct zynqmp_eemi_ops *eemi_ops;
/**
+ * struct zynq_platform_data - zynqmp qspi platform data structure
+ * @quirks: Flags is used to identify the platform
+ */
+struct qspi_platform_data {
+ u32 quirks;
+};
+
+/**
* struct zynqmp_qspi - Defines qspi driver instance
* @regs: Virtual address of the QSPI controller registers
* @refclk: Pointer to the peripheral clock
@@ -152,8 +187,14 @@ static const struct zynqmp_eemi_ops *eemi_ops;
* @genfifobus: Used to select the upper or lower bus
* @dma_rx_bytes: Remaining bytes to receive by DMA mode
* @dma_addr: DMA address after mapping the kernel buffer
+ * @tx_bus_width: Used to represent number of data wires for tx
+ * @rx_bus_width: Used to represent number of data wires
* @genfifoentry: Used for storing the genfifoentry instruction.
+ * @isinstr: To determine whether the transfer is instruction
* @mode: Defines the mode in which QSPI is operating
+ * @speed_hz: Current SPI bus clock speed in hz
+ * @io_mode: Defines the operating mode, either IO or dma
+ * @has_tapdelay: Used for tapdelay register available in qspi
*/
struct zynqmp_qspi {
void __iomem *regs;
@@ -169,14 +210,22 @@ struct zynqmp_qspi {
u32 genfifobus;
u32 dma_rx_bytes;
dma_addr_t dma_addr;
+ u32 rx_bus_width;
+ u32 tx_bus_width;
u32 genfifoentry;
+ bool isinstr;
enum mode_type mode;
+ u32 speed_hz;
+ bool io_mode;
+ bool has_tapdelay;
};
/**
- * zynqmp_gqspi_read: For GQSPI controller read operation
+ * zynqmp_gqspi_read - For GQSPI controller read operation
* @xqspi: Pointer to the zynqmp_qspi structure
* @offset: Offset from where to read
+ *
+ * Return: Value read from the qspi register
*/
static u32 zynqmp_gqspi_read(struct zynqmp_qspi *xqspi, u32 offset)
{
@@ -184,7 +233,7 @@ static u32 zynqmp_gqspi_read(struct zynqmp_qspi *xqspi, u32 offset)
}
/**
- * zynqmp_gqspi_write: For GQSPI controller write operation
+ * zynqmp_gqspi_write - For GQSPI controller write operation
* @xqspi: Pointer to the zynqmp_qspi structure
* @offset: Offset where to write
* @val: Value to be written
@@ -196,10 +245,10 @@ static inline void zynqmp_gqspi_write(struct zynqmp_qspi *xqspi, u32 offset,
}
/**
- * zynqmp_gqspi_selectslave: For selection of slave device
+ * zynqmp_gqspi_selectslave - For selection of slave device
* @instanceptr: Pointer to the zynqmp_qspi structure
- * @flashcs: For chip select
- * @flashbus: To check which bus is selected- upper or lower
+ * @slavecs: For chip select
+ * @slavebus: To check which bus is selected- upper or lower
*/
static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
u8 slavecs, u8 slavebus)
@@ -243,7 +292,76 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
}
/**
- * zynqmp_qspi_init_hw: Initialize the hardware
+ * zynqmp_qspi_set_tapdelay - To configure qspi tap delays
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @baudrateval: Buadrate to configure
+ */
+static void zynqmp_qspi_set_tapdelay(struct zynqmp_qspi *xqspi, u32 baudrateval)
+{
+ u32 tapdlybypass = 0, lpbkdlyadj = 0, datadlyadj = 0, clk_rate;
+ u32 reqhz = 0;
+
+ if (!eemi_ops->ioctl)
+ return;
+
+ clk_rate = clk_get_rate(xqspi->refclk);
+ reqhz = (clk_rate / (GQSPI_BAUD_DIV_SHIFT << baudrateval));
+
+ if (!xqspi->has_tapdelay) {
+ if (reqhz <= GQSPI_FREQ_40MHZ) {
+ eemi_ops->ioctl(NODE_QSPI, IOCTL_SET_TAPDELAY_BYPASS,
+ PM_TAPDELAY_QSPI,
+ PM_TAPDELAY_BYPASS_ENABLE,
+ NULL);
+ } else if (reqhz <= GQSPI_FREQ_100MHZ) {
+ eemi_ops->ioctl(NODE_QSPI, IOCTL_SET_TAPDELAY_BYPASS,
+ PM_TAPDELAY_QSPI,
+ PM_TAPDELAY_BYPASS_ENABLE,
+ NULL);
+ lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
+ datadlyadj |= ((GQSPI_USE_DATA_DLY <<
+ GQSPI_USE_DATA_DLY_SHIFT)
+ | (GQSPI_DATA_DLY_ADJ_VALUE <<
+ GQSPI_DATA_DLY_ADJ_SHIFT));
+ } else if (reqhz <= GQSPI_FREQ_150MHZ) {
+ lpbkdlyadj |= GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK;
+ }
+ } else {
+ if (reqhz <= GQSPI_FREQ_40MHZ) {
+ tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
+ TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
+ } else if (reqhz <= GQSPI_FREQ_100MHZ) {
+ tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
+ TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
+ lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
+ datadlyadj |= ((GQSPI_USE_DATA_DLY <<
+ GQSPI_USE_DATA_DLY_SHIFT)
+ | (GQSPI_DATA_DLY_ADJ_VALUE <<
+ GQSPI_DATA_DLY_ADJ_SHIFT));
+ } else if (reqhz <= GQSPI_FREQ_150MHZ) {
+ lpbkdlyadj |= GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK;
+ }
+ zynqmp_gqspi_write(xqspi,
+ IOU_TAPDLY_BYPASS_OFST, tapdlybypass);
+ }
+
+ zynqmp_gqspi_write(xqspi, GQSPI_LPBK_DLY_ADJ_OFST, lpbkdlyadj);
+ zynqmp_gqspi_write(xqspi, GQSPI_DATA_DLY_ADJ_OFST, datadlyadj);
+}
+
+static u32 zynqmp_disable_intr(struct zynqmp_qspi *xqspi)
+{
+ u32 value;
+
+ zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
+ value = zynqmp_gqspi_read(xqspi, GQSPI_IMASK_OFST);
+ zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
+
+ return value;
+}
+
+/**
+ * zynqmp_qspi_init_hw - Initialize the hardware
* @xqspi: Pointer to the zynqmp_qspi structure
*
* The default settings of the QSPI controller's configurable parameters on
@@ -267,9 +385,7 @@ static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
/* Select the GQSPI mode */
zynqmp_gqspi_write(xqspi, GQSPI_SEL_OFST, GQSPI_SEL_MASK);
/* Clear and disable interrupts */
- zynqmp_gqspi_write(xqspi, GQSPI_ISR_OFST,
- zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST) |
- GQSPI_ISR_WR_TO_CLR_MASK);
+ zynqmp_disable_intr(xqspi);
/* Clear the DMA STS */
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST,
zynqmp_gqspi_read(xqspi,
@@ -321,17 +437,18 @@ static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
zynqmp_gqspi_selectslave(xqspi,
GQSPI_SELECT_FLASH_CS_LOWER,
GQSPI_SELECT_FLASH_BUS_LOWER);
- /* Initialize DMA */
- zynqmp_gqspi_write(xqspi,
+ if (!xqspi->io_mode) {
+ /* Initialize DMA */
+ zynqmp_gqspi_write(xqspi,
GQSPI_QSPIDMA_DST_CTRL_OFST,
GQSPI_QSPIDMA_DST_CTRL_RESET_VAL);
-
+ }
/* Enable the GQSPI */
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
}
/**
- * zynqmp_qspi_copy_read_data: Copy data to RX buffer
+ * zynqmp_qspi_copy_read_data - Copy data to RX buffer
* @xqspi: Pointer to the zynqmp_qspi structure
* @data: The variable where data is stored
* @size: Number of bytes to be copied from data to RX buffer
@@ -345,7 +462,7 @@ static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi,
}
/**
- * zynqmp_prepare_transfer_hardware: Prepares hardware for transfer.
+ * zynqmp_prepare_transfer_hardware - Prepares hardware for transfer.
* @master: Pointer to the spi_master structure which provides
* information about the controller.
*
@@ -362,7 +479,7 @@ static int zynqmp_prepare_transfer_hardware(struct spi_master *master)
}
/**
- * zynqmp_unprepare_transfer_hardware: Relaxes hardware after transfer
+ * zynqmp_unprepare_transfer_hardware - Relaxes hardware after transfer
* @master: Pointer to the spi_master structure which provides
* information about the controller.
*
@@ -379,7 +496,7 @@ static int zynqmp_unprepare_transfer_hardware(struct spi_master *master)
}
/**
- * zynqmp_qspi_chipselect: Select or deselect the chip select line
+ * zynqmp_qspi_chipselect - Select or deselect the chip select line
* @qspi: Pointer to the spi_device structure
* @is_high: Select(0) or deselect (1) the chip select line
*/
@@ -390,11 +507,27 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
u32 genfifoentry = 0x0, statusreg;
genfifoentry |= GQSPI_GENFIFO_MODE_SPI;
+
+ if (qspi->master->flags & SPI_MASTER_BOTH_CS) {
+ zynqmp_gqspi_selectslave(xqspi,
+ GQSPI_SELECT_FLASH_CS_BOTH,
+ GQSPI_SELECT_FLASH_BUS_BOTH);
+ } else if (qspi->master->flags & SPI_MASTER_U_PAGE) {
+ zynqmp_gqspi_selectslave(xqspi,
+ GQSPI_SELECT_FLASH_CS_UPPER,
+ GQSPI_SELECT_FLASH_BUS_LOWER);
+ } else {
+ zynqmp_gqspi_selectslave(xqspi,
+ GQSPI_SELECT_FLASH_CS_LOWER,
+ GQSPI_SELECT_FLASH_BUS_LOWER);
+ }
+
genfifoentry |= xqspi->genfifobus;
if (!is_high) {
genfifoentry |= xqspi->genfifocs;
genfifoentry |= GQSPI_GENFIFO_CS_SETUP;
+ xqspi->isinstr = true;
} else {
genfifoentry |= GQSPI_GENFIFO_CS_HOLD;
}
@@ -415,8 +548,7 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
if ((statusreg & GQSPI_ISR_GENFIFOEMPTY_MASK) &&
(statusreg & GQSPI_ISR_TXEMPTY_MASK))
break;
- else
- cpu_relax();
+ cpu_relax();
} while (!time_after_eq(jiffies, timeout));
if (time_after_eq(jiffies, timeout))
@@ -424,7 +556,7 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
}
/**
- * zynqmp_qspi_setup_transfer: Configure QSPI controller for specified
+ * zynqmp_qspi_setup_transfer - Configure QSPI controller for specified
* transfer
* @qspi: Pointer to the spi_device structure
* @transfer: Pointer to the spi_transfer structure which provides
@@ -457,33 +589,39 @@ static int zynqmp_qspi_setup_transfer(struct spi_device *qspi,
else
req_hz = qspi->max_speed_hz;
- /* Set the clock frequency */
- /* If req_hz == 0, default to lowest speed */
- clk_rate = clk_get_rate(xqspi->refclk);
+ if (xqspi->speed_hz != req_hz) {
+ /* Set the clock frequency */
+ /* If req_hz == 0, default to lowest speed */
+ clk_rate = clk_get_rate(xqspi->refclk);
- while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
- (clk_rate /
- (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > req_hz)
- baud_rate_val++;
+ while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
+ (clk_rate /
+ (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > req_hz)
+ baud_rate_val++;
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- /* Set the QSPI clock phase and clock polarity */
- config_reg &= (~GQSPI_CFG_CLK_PHA_MASK) & (~GQSPI_CFG_CLK_POL_MASK);
+ /* Set the QSPI clock phase and clock polarity */
+ config_reg &= (~GQSPI_CFG_CLK_PHA_MASK) &
+ (~GQSPI_CFG_CLK_POL_MASK);
- if (qspi->mode & SPI_CPHA)
- config_reg |= GQSPI_CFG_CLK_PHA_MASK;
- if (qspi->mode & SPI_CPOL)
- config_reg |= GQSPI_CFG_CLK_POL_MASK;
+ if (qspi->mode & SPI_CPHA)
+ config_reg |= GQSPI_CFG_CLK_PHA_MASK;
+ if (qspi->mode & SPI_CPOL)
+ config_reg |= GQSPI_CFG_CLK_POL_MASK;
+ config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
+ config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ xqspi->speed_hz = clk_rate / (GQSPI_BAUD_DIV_SHIFT <<
+ baud_rate_val);
+ zynqmp_qspi_set_tapdelay(xqspi, baud_rate_val);
+ }
- config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
- config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
return 0;
}
/**
- * zynqmp_qspi_setup: Configure the QSPI controller
+ * zynqmp_qspi_setup - Configure the QSPI controller
* @qspi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer,
@@ -499,7 +637,7 @@ static int zynqmp_qspi_setup(struct spi_device *qspi)
}
/**
- * zynqmp_qspi_filltxfifo: Fills the TX FIFO as long as there is room in
+ * zynqmp_qspi_filltxfifo - Fills the TX FIFO as long as there is room in
* the FIFO or the bytes required to be
* transmitted.
* @xqspi: Pointer to the zynqmp_qspi structure
@@ -525,7 +663,7 @@ static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
}
/**
- * zynqmp_qspi_readrxfifo: Fills the RX FIFO as long as there is room in
+ * zynqmp_qspi_readrxfifo - Fills the RX FIFO as long as there is room in
* the FIFO.
* @xqspi: Pointer to the zynqmp_qspi structure
* @size: Number of bytes to be copied from RX buffer to RX FIFO
@@ -538,7 +676,7 @@ static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
while ((count < size) && (xqspi->bytes_to_receive > 0)) {
if (xqspi->bytes_to_receive >= 4) {
(*(u32 *) xqspi->rxbuf) =
- zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST);
+ zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST);
xqspi->rxbuf += 4;
xqspi->bytes_to_receive -= 4;
count += 4;
@@ -553,7 +691,40 @@ static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
}
/**
- * zynqmp_process_dma_irq: Handler for DMA done interrupt of QSPI
+ * zynqmp_qspi_preparedummy - Prepares the dummy entry
+ *
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @transfer: It is a pointer to the structure containing transfer data.
+ * @genfifoentry: genfifoentry is pointer to the variable in which
+ * GENFIFO mask is returned to calling function
+ */
+static void zynqmp_qspi_preparedummy(struct zynqmp_qspi *xqspi,
+ struct spi_transfer *transfer,
+ u32 *genfifoentry)
+{
+ /* For dummy Tx and Rx are NULL */
+ *genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
+
+ /* SPI mode */
+ *genfifoentry &= ~GQSPI_GENFIFO_MODE_QUADSPI;
+ if (xqspi->rx_bus_width == GQSPI_RX_BUS_WIDTH_QUAD ||
+ xqspi->tx_bus_width == GQSPI_TX_BUS_WIDTH_QUAD)
+ *genfifoentry |= GQSPI_GENFIFO_MODE_QUADSPI;
+ else if (xqspi->rx_bus_width == GQSPI_RX_BUS_WIDTH_DUAL ||
+ xqspi->tx_bus_width == GQSPI_TX_BUS_WIDTH_DUAL)
+ *genfifoentry |= GQSPI_GENFIFO_MODE_DUALSPI;
+ else
+ *genfifoentry |= GQSPI_GENFIFO_MODE_SPI;
+
+ /* Immediate data */
+ *genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
+
+ if (transfer->dummy)
+ *genfifoentry |= transfer->dummy;
+}
+
+/**
+ * zynqmp_process_dma_irq - Handler for DMA done interrupt of QSPI
* controller
* @xqspi: zynqmp_qspi instance pointer
*
@@ -601,7 +772,7 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
}
/**
- * zynqmp_qspi_irq: Interrupt service routine of the QSPI controller
+ * zynqmp_qspi_irq - Interrupt service routine of the QSPI controller
* @irq: IRQ number
* @dev_id: Pointer to the xqspi structure
*
@@ -639,23 +810,29 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) {
zynqmp_process_dma_irq(xqspi);
ret = IRQ_HANDLED;
- } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
- (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
+ } else if ((mask & GQSPI_IER_RXNEMPTY_MASK)) {
+ zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
+ ret = IRQ_HANDLED;
+ }
+ if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
+ (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
ret = IRQ_HANDLED;
}
if ((xqspi->bytes_to_receive == 0) && (xqspi->bytes_to_transfer == 0)
&& ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
- zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
+ zynqmp_disable_intr(xqspi);
+ xqspi->isinstr = false;
spi_finalize_current_transfer(master);
ret = IRQ_HANDLED;
}
+
return ret;
}
/**
- * zynqmp_qspi_selectspimode: Selects SPI mode - x1 or x2 or x4.
+ * zynqmp_qspi_selectspimode - Selects SPI mode - x1 or x2 or x4.
* @xqspi: xqspi is a pointer to the GQSPI instance
* @spimode: spimode - SPI or DUAL or QUAD.
* Return: Mask to set desired SPI mode in GENFIFO entry.
@@ -683,7 +860,7 @@ static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
}
/**
- * zynq_qspi_setuprxdma: This function sets up the RX DMA operation
+ * zynq_qspi_setuprxdma - This function sets up the RX DMA operation
* @xqspi: xqspi is a pointer to the GQSPI instance.
*/
static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
@@ -692,8 +869,9 @@ static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
dma_addr_t addr;
u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf;
- if ((xqspi->bytes_to_receive < 8) ||
- ((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) {
+ if (((xqspi->bytes_to_receive < 8) || (xqspi->io_mode)) ||
+ ((dma_align & GQSPI_DMA_UNALIGN) != 0x0) ||
+ is_vmalloc_addr(xqspi->rxbuf)) {
/* Setting to IO mode */
config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
@@ -708,8 +886,10 @@ static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
rx_bytes, DMA_FROM_DEVICE);
- if (dma_mapping_error(xqspi->dev, addr))
+ if (dma_mapping_error(xqspi->dev, addr)) {
dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
+ return;
+ }
xqspi->dma_rx_bytes = rx_bytes;
xqspi->dma_addr = addr;
@@ -733,7 +913,7 @@ static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
}
/**
- * zynqmp_qspi_txrxsetup: This function checks the TX/RX buffers in
+ * zynqmp_qspi_txrxsetup - This function checks the TX/RX buffers in
* the transfer and sets up the GENFIFO entries,
* TX FIFO as required.
* @xqspi: xqspi is a pointer to the GQSPI instance.
@@ -755,7 +935,7 @@ static void zynqmp_qspi_txrxsetup(struct zynqmp_qspi *xqspi,
*genfifoentry |= GQSPI_GENFIFO_TX;
*genfifoentry |=
zynqmp_qspi_selectspimode(xqspi, transfer->tx_nbits);
- xqspi->bytes_to_transfer = transfer->len;
+ xqspi->bytes_to_transfer = transfer->len - (transfer->dummy/8);
if (xqspi->mode == GQSPI_MODE_DMA) {
config_reg = zynqmp_gqspi_read(xqspi,
GQSPI_CONFIG_OFST);
@@ -784,7 +964,7 @@ static void zynqmp_qspi_txrxsetup(struct zynqmp_qspi *xqspi,
}
/**
- * zynqmp_qspi_start_transfer: Initiates the QSPI transfer
+ * zynqmp_qspi_start_transfer - Initiates the QSPI transfer
* @master: Pointer to the spi_master structure which provides
* information about the controller.
* @qspi: Pointer to the spi_device structure
@@ -811,18 +991,28 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
genfifoentry |= xqspi->genfifocs;
genfifoentry |= xqspi->genfifobus;
+ if (!xqspi->isinstr && (master->flags & SPI_MASTER_DATA_STRIPE)) {
+ if (transfer->stripe)
+ genfifoentry |= GQSPI_GENFIFO_STRIPE;
+ }
zynqmp_qspi_txrxsetup(xqspi, transfer, &genfifoentry);
if (xqspi->mode == GQSPI_MODE_DMA)
transfer_len = xqspi->dma_rx_bytes;
else
- transfer_len = transfer->len;
+ transfer_len = transfer->len - (transfer->dummy/8);
xqspi->genfifoentry = genfifoentry;
if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) {
genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
genfifoentry |= transfer_len;
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
+ if (transfer->dummy || transfer->tx_nbits >= 1) {
+ zynqmp_qspi_preparedummy(xqspi, transfer,
+ &genfifoentry);
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
+ genfifoentry);
+ }
} else {
int tempcount = transfer_len;
u32 exponent = 8; /* 2^8 = 256 */
@@ -849,6 +1039,12 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
if (imm_data != 0) {
genfifoentry &= ~GQSPI_GENFIFO_EXP;
genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
+ if (imm_data % 4 != 0) {
+ if (((imm_data + 4 - (imm_data % 4)) & 0xFF) == 0x00)
+ imm_data = 0xFF;
+ else
+ imm_data = imm_data + 4 - (imm_data % 4);
+ }
genfifoentry |= (u8) (imm_data & 0xFF);
zynqmp_gqspi_write(xqspi,
GQSPI_GEN_FIFO_OFST, genfifoentry);
@@ -869,7 +1065,6 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
if (xqspi->txbuf != NULL)
/* Enable interrupts for TX */
zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
- GQSPI_IER_TXEMPTY_MASK |
GQSPI_IER_GENFIFOEMPTY_MASK |
GQSPI_IER_TXNOT_FULL_MASK);
@@ -883,8 +1078,7 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
} else {
zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
GQSPI_IER_GENFIFOEMPTY_MASK |
- GQSPI_IER_RXNEMPTY_MASK |
- GQSPI_IER_RXEMPTY_MASK);
+ GQSPI_IER_RXNEMPTY_MASK);
}
}
@@ -892,8 +1086,8 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
}
/**
- * zynqmp_qspi_suspend: Suspend method for the QSPI driver
- * @_dev: Address of the platform_device structure
+ * zynqmp_qspi_suspend - Suspend method for the QSPI driver
+ * @dev: Address of the platform_device structure
*
* This function stops the QSPI driver queue and disables the QSPI controller
*
@@ -911,7 +1105,7 @@ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
}
/**
- * zynqmp_qspi_resume: Resume method for the QSPI driver
+ * zynqmp_qspi_resume - Resume method for the QSPI driver
* @dev: Address of the platform_device structure
*
* The function starts the QSPI driver queue and initializes the QSPI
@@ -938,10 +1132,12 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
return ret;
}
+ zynqmp_qspi_init_hw(xqspi);
spi_master_resume(master);
clk_disable(xqspi->refclk);
clk_disable(xqspi->pclk);
+
return 0;
}
@@ -994,14 +1190,38 @@ static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
return 0;
}
+static int __maybe_unused zynqmp_runtime_idle(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
+ u32 value;
+
+ value = zynqmp_gqspi_read(xqspi, GQSPI_EN_OFST);
+ if (value)
+ return -EBUSY;
+
+ return 0;
+}
+
static const struct dev_pm_ops zynqmp_qspi_dev_pm_ops = {
SET_RUNTIME_PM_OPS(zynqmp_runtime_suspend,
- zynqmp_runtime_resume, NULL)
+ zynqmp_runtime_resume, zynqmp_runtime_idle)
SET_SYSTEM_SLEEP_PM_OPS(zynqmp_qspi_suspend, zynqmp_qspi_resume)
};
+static const struct qspi_platform_data versal_qspi_def = {
+ .quirks = QSPI_QUIRK_HAS_TAPDELAY,
+};
+
+static const struct of_device_id zynqmp_qspi_of_match[] = {
+ { .compatible = "xlnx,zynqmp-qspi-1.0"},
+ { .compatible = "xlnx,versal-qspi-1.0", .data = &versal_qspi_def },
+ { /* End of table */ }
+};
+
/**
- * zynqmp_qspi_probe: Probe method for the QSPI driver
+ * zynqmp_qspi_probe - Probe method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function initializes the driver data structures and the hardware.
@@ -1015,6 +1235,11 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
struct zynqmp_qspi *xqspi;
struct resource *res;
struct device *dev = &pdev->dev;
+ struct device_node *nc;
+ const struct of_device_id *match;
+ u32 num_cs;
+ u32 rx_bus_width;
+ u32 tx_bus_width;
eemi_ops = zynqmp_pm_get_eemi_ops();
if (IS_ERR(eemi_ops))
@@ -1028,6 +1253,14 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, master);
+ match = of_match_node(zynqmp_qspi_of_match, pdev->dev.of_node);
+ if (match) {
+ const struct qspi_platform_data *p_data = match->data;
+
+ if (p_data && (p_data->quirks & QSPI_QUIRK_HAS_TAPDELAY))
+ xqspi->has_tapdelay = true;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(xqspi->regs)) {
@@ -1066,11 +1299,16 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
+
+ if (of_property_read_bool(pdev->dev.of_node, "has-io-mode"))
+ xqspi->io_mode = true;
+
/* QSPI controller initializations */
zynqmp_qspi_init_hw(xqspi);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
+
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) {
ret = -ENXIO;
@@ -1085,8 +1323,37 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
goto clk_dis_all;
}
- master->num_chipselect = GQSPI_DEFAULT_NUM_CS;
+ xqspi->rx_bus_width = GQSPI_RX_BUS_WIDTH_SINGLE;
+ for_each_available_child_of_node(pdev->dev.of_node, nc) {
+ ret = of_property_read_u32(nc, "spi-rx-bus-width",
+ &rx_bus_width);
+ if (!ret) {
+ xqspi->rx_bus_width = rx_bus_width;
+ break;
+ }
+ }
+ if (ret)
+ dev_err(dev, "rx bus width not found\n");
+
+ xqspi->tx_bus_width = GQSPI_TX_BUS_WIDTH_SINGLE;
+ for_each_available_child_of_node(pdev->dev.of_node, nc) {
+ ret = of_property_read_u32(nc, "spi-tx-bus-width",
+ &tx_bus_width);
+ if (!ret) {
+ xqspi->tx_bus_width = tx_bus_width;
+ break;
+ }
+ }
+ if (ret)
+ dev_err(dev, "tx bus width not found\n");
+
+ ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+ if (ret < 0)
+ master->num_chipselect = GQSPI_DEFAULT_NUM_CS;
+ else
+ master->num_chipselect = num_cs;
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
master->setup = zynqmp_qspi_setup;
master->set_cs = zynqmp_qspi_chipselect;
master->transfer_one = zynqmp_qspi_start_transfer;
@@ -1097,6 +1364,8 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
+ xqspi->speed_hz = master->max_speed_hz;
+ master->auto_runtime_pm = true;
if (master->dev.parent == NULL)
master->dev.parent = &master->dev;
@@ -1120,7 +1389,7 @@ remove_master:
}
/**
- * zynqmp_qspi_remove: Remove method for the QSPI driver
+ * zynqmp_qspi_remove - Remove method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function is called if a device is physically removed from the system or
@@ -1145,11 +1414,6 @@ static int zynqmp_qspi_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id zynqmp_qspi_of_match[] = {
- { .compatible = "xlnx,zynqmp-qspi-1.0", },
- { /* End of table */ }
-};
-
MODULE_DEVICE_TABLE(of, zynqmp_qspi_of_match);
static struct platform_driver zynqmp_qspi_driver = {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index be5b4b65c016..bb2bb9bfe4fe 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -3056,6 +3056,14 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
}
}
+ /*
+ * Data stripe option is selected if and only if when
+ * two chips are enabled
+ */
+ if ((ctlr->flags & SPI_MASTER_DATA_STRIPE)
+ && !(ctlr->flags & SPI_MASTER_BOTH_CS))
+ return -EINVAL;
+
/* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
* either MOSI or MISO is missing. They can also be caused by
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index d5f771fafc21..3f5fb3816815 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -78,6 +78,8 @@ source "drivers/staging/gs_fpgaboot/Kconfig"
source "drivers/staging/unisys/Kconfig"
+source "drivers/staging/apf/Kconfig"
+
source "drivers/staging/clocking-wizard/Kconfig"
source "drivers/staging/fbtft/Kconfig"
@@ -96,6 +98,8 @@ source "drivers/staging/vc04_services/Kconfig"
source "drivers/staging/pi433/Kconfig"
+source "drivers/staging/fclk/Kconfig"
+
source "drivers/staging/mt7621-pci/Kconfig"
source "drivers/staging/mt7621-pci-phy/Kconfig"
@@ -118,4 +122,14 @@ source "drivers/staging/fieldbus/Kconfig"
source "drivers/staging/kpc2000/Kconfig"
+source "drivers/staging/xlnx_ctrl_driver/Kconfig"
+
+source "drivers/staging/xlnx_ernic/Kconfig"
+
+source "drivers/staging/xroeframer/Kconfig"
+
+source "drivers/staging/xroetrafficgen/Kconfig"
+
+source "drivers/staging/xlnxsync/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 0da0d3f0b5e4..c78b68d11c28 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -29,7 +29,9 @@ obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
obj-$(CONFIG_UNISYSSPAR) += unisys/
+obj-$(CONFIG_XILINX_APF) += apf/
obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
+obj-$(CONFIG_XILINX_FCLK) += fclk/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/
obj-$(CONFIG_WILC1000) += wilc1000/
@@ -49,3 +51,7 @@ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_EROFS_FS) += erofs/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
+obj-y += xlnx_ctrl_driver/
+obj-$(CONFIG_ERNIC) += xlnx_ernic/
+obj-$(CONFIG_XROE_FRAMER) += xroeframer/
+obj-$(CONFIG_XLNX_SYNC) += xlnxsync/
diff --git a/drivers/staging/apf/Kconfig b/drivers/staging/apf/Kconfig
new file mode 100644
index 000000000000..63498f9fbd95
--- /dev/null
+++ b/drivers/staging/apf/Kconfig
@@ -0,0 +1,19 @@
+#
+# APF driver configuration
+#
+
+menuconfig XILINX_APF
+ tristate "Xilinx APF Accelerator driver"
+ depends on ARCH_ZYNQ || ARCH_ZYNQMP
+ default n
+ select UIO
+ select DMA_SHARED_BUFFER
+ help
+ Select if you want to include APF accelerator driver
+
+config XILINX_DMA_APF
+ bool "Xilinx APF DMA engines support"
+ depends on XILINX_APF
+ select DMA_ENGINE
+ help
+ Enable support for the Xilinx APF DMA controllers.
diff --git a/drivers/staging/apf/Makefile b/drivers/staging/apf/Makefile
new file mode 100644
index 000000000000..bf281a2c16df
--- /dev/null
+++ b/drivers/staging/apf/Makefile
@@ -0,0 +1,9 @@
+# gpio support: dedicated expander chips, etc
+
+ccflags-$(CONFIG_DEBUG_XILINX_APF) += -DDEBUG
+ccflags-$(CONFIG_XILINX_APF) += -Idrivers/dma
+
+obj-$(CONFIG_XILINX_APF) += xlnk.o
+obj-$(CONFIG_XILINX_APF) += xlnk-eng.o
+obj-$(CONFIG_XILINX_DMA_APF) += xilinx-dma-apf.o
+
diff --git a/drivers/staging/apf/dt-binding.txt b/drivers/staging/apf/dt-binding.txt
new file mode 100644
index 000000000000..fd73725fa589
--- /dev/null
+++ b/drivers/staging/apf/dt-binding.txt
@@ -0,0 +1,17 @@
+* Xilinx APF xlnk driver
+
+Required properties:
+- compatible: Should be "xlnx,xlnk"
+- clock-names: List of clock names
+- clocks: List of clock sources corresponding to the clock names
+
+The number of elements on the clock-names and clocks lists should be the same.
+If there are no controllable clocks, the xlnk node should be omitted from the
+devicetree.
+
+Example:
+ xlnk {
+ compatible = "xlnx,xlnk-1.0";
+ clock-names = "clk166", "clk150", "clk100", "clk200";
+ clocks = <&clkc 15>, <&clkc 16>, <&clkc 17>, <&clkc 18>;
+ };
diff --git a/drivers/staging/apf/xilinx-dma-apf.c b/drivers/staging/apf/xilinx-dma-apf.c
new file mode 100644
index 000000000000..55913130eafc
--- /dev/null
+++ b/drivers/staging/apf/xilinx-dma-apf.c
@@ -0,0 +1,1232 @@
+/*
+ * Xilinx AXI DMA Engine support
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ * Description:
+ * This driver supports Xilinx AXI DMA engine:
+ * . Axi DMA engine, it does transfers between memory and device. It can be
+ * configured to have one channel or two channels. If configured as two
+ * channels, one is for transmit to device and another is for receive from
+ * device.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/pm.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/dma-buf.h>
+
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/of_irq.h>
+
+#include "xilinx-dma-apf.h"
+
+#include "xlnk.h"
+
+static DEFINE_MUTEX(dma_list_mutex);
+static LIST_HEAD(dma_device_list);
+/* IO accessors */
+#define DMA_OUT_64(addr, val) (writeq(val, addr))
+#define DMA_OUT(addr, val) (iowrite32(val, addr))
+#define DMA_IN(addr) (ioread32(addr))
+
+#define GET_LOW(x) ((u32)((x) & 0xFFFFFFFF))
+#define GET_HI(x) ((u32)((x) / 0x100000000))
+
+static int unpin_user_pages(struct scatterlist *sglist, unsigned int cnt);
+/* Driver functions */
+static void xdma_clean_bd(struct xdma_desc_hw *bd)
+{
+ bd->src_addr = 0x0;
+ bd->control = 0x0;
+ bd->status = 0x0;
+ bd->app[0] = 0x0;
+ bd->app[1] = 0x0;
+ bd->app[2] = 0x0;
+ bd->app[3] = 0x0;
+ bd->app[4] = 0x0;
+ bd->dmahead = 0x0;
+ bd->sw_flag = 0x0;
+}
+
+static int dma_is_running(struct xdma_chan *chan)
+{
+ return !(DMA_IN(&chan->regs->sr) & XDMA_SR_HALTED_MASK) &&
+ (DMA_IN(&chan->regs->cr) & XDMA_CR_RUNSTOP_MASK);
+}
+
+static int dma_is_idle(struct xdma_chan *chan)
+{
+ return DMA_IN(&chan->regs->sr) & XDMA_SR_IDLE_MASK;
+}
+
+static void dma_halt(struct xdma_chan *chan)
+{
+ DMA_OUT(&chan->regs->cr,
+ (DMA_IN(&chan->regs->cr) & ~XDMA_CR_RUNSTOP_MASK));
+}
+
+static void dma_start(struct xdma_chan *chan)
+{
+ DMA_OUT(&chan->regs->cr,
+ (DMA_IN(&chan->regs->cr) | XDMA_CR_RUNSTOP_MASK));
+}
+
+static int dma_init(struct xdma_chan *chan)
+{
+ int loop = XDMA_RESET_LOOP;
+
+ DMA_OUT(&chan->regs->cr,
+ (DMA_IN(&chan->regs->cr) | XDMA_CR_RESET_MASK));
+
+ /* Wait for the hardware to finish reset
+ */
+ while (loop) {
+ if (!(DMA_IN(&chan->regs->cr) & XDMA_CR_RESET_MASK))
+ break;
+
+ loop -= 1;
+ }
+
+ if (!loop)
+ return 1;
+
+ return 0;
+}
+
+static int xdma_alloc_chan_descriptors(struct xdma_chan *chan)
+{
+ int i;
+ u8 *ptr;
+
+ /*
+ * We need the descriptor to be aligned to 64bytes
+ * for meeting Xilinx DMA specification requirement.
+ */
+ ptr = (u8 *)dma_alloc_coherent(chan->dev,
+ (sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT),
+ &chan->bd_phys_addr,
+ GFP_KERNEL);
+
+ if (!ptr) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptor pool\n",
+ chan->id);
+ return -ENOMEM;
+ }
+
+ memset(ptr, 0, (sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT));
+ chan->bd_cur = 0;
+ chan->bd_tail = 0;
+ chan->bd_used = 0;
+ chan->bd_chain_size = sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT;
+
+ /*
+ * Pre allocate all the channels.
+ */
+ for (i = 0; i < XDMA_MAX_BD_CNT; i++) {
+ chan->bds[i] = (struct xdma_desc_hw *)
+ (ptr + (sizeof(struct xdma_desc_hw) * i));
+ chan->bds[i]->next_desc = chan->bd_phys_addr +
+ (sizeof(struct xdma_desc_hw) *
+ ((i + 1) % XDMA_MAX_BD_CNT));
+ }
+
+ /* there is at least one descriptor free to be allocated */
+ return 0;
+}
+
+static void xdma_free_chan_resources(struct xdma_chan *chan)
+{
+ dev_dbg(chan->dev, "Free all channel resources.\n");
+ dma_free_coherent(chan->dev, (sizeof(struct xdma_desc_hw) *
+ XDMA_MAX_BD_CNT), chan->bds[0], chan->bd_phys_addr);
+}
+
+static void xilinx_chan_desc_reinit(struct xdma_chan *chan)
+{
+ struct xdma_desc_hw *desc;
+ unsigned int start, end;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ start = 0;
+ end = XDMA_MAX_BD_CNT;
+
+ while (start < end) {
+ desc = chan->bds[start];
+ xdma_clean_bd(desc);
+ start++;
+ }
+ /* Re-initialize bd_cur and bd_tail values */
+ chan->bd_cur = 0;
+ chan->bd_tail = 0;
+ chan->bd_used = 0;
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void xilinx_chan_desc_cleanup(struct xdma_chan *chan)
+{
+ struct xdma_head *dmahead;
+ struct xdma_desc_hw *desc;
+ struct completion *cmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+#define XDMA_BD_STS_RXEOF_MASK 0x04000000
+ desc = chan->bds[chan->bd_cur];
+ while (desc->status & XDMA_BD_STS_ALL_MASK) {
+ if ((desc->status & XDMA_BD_STS_RXEOF_MASK) &&
+ !(desc->dmahead)) {
+ pr_info("ERROR: premature EOF on DMA\n");
+ dma_init(chan); /* reset the dma HW */
+ while (!(desc->dmahead)) {
+ xdma_clean_bd(desc);
+ chan->bd_used--;
+ chan->bd_cur++;
+ if (chan->bd_cur >= XDMA_MAX_BD_CNT)
+ chan->bd_cur = 0;
+ desc = chan->bds[chan->bd_cur];
+ }
+ }
+ if (desc->dmahead) {
+ if ((desc->sw_flag & XDMA_BD_SF_POLL_MODE_MASK))
+ if (!(desc->sw_flag & XDMA_BD_SF_SW_DONE_MASK))
+ break;
+
+ dmahead = (struct xdma_head *)desc->dmahead;
+ cmp = (struct completion *)&dmahead->cmp;
+ if (dmahead->nappwords_o)
+ memcpy(dmahead->appwords_o, desc->app,
+ dmahead->nappwords_o * sizeof(u32));
+
+ if (chan->poll_mode)
+ cmp->done = 1;
+ else
+ complete(cmp);
+ }
+ xdma_clean_bd(desc);
+ chan->bd_used--;
+ chan->bd_cur++;
+ if (chan->bd_cur >= XDMA_MAX_BD_CNT)
+ chan->bd_cur = 0;
+ desc = chan->bds[chan->bd_cur];
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void xdma_err_tasklet(unsigned long data)
+{
+ struct xdma_chan *chan = (struct xdma_chan *)data;
+
+ if (chan->err) {
+ /* If reset failed, need to hard reset
+ * Channel is no longer functional
+ */
+ if (!dma_init(chan))
+ chan->err = 0;
+ else
+ dev_err(chan->dev, "DMA channel reset failed, please reset system\n");
+ }
+
+ /* Barrier to assert descriptor init is reaches memory */
+ rmb();
+ xilinx_chan_desc_cleanup(chan);
+
+ xilinx_chan_desc_reinit(chan);
+}
+
+static void xdma_tasklet(unsigned long data)
+{
+ struct xdma_chan *chan = (struct xdma_chan *)data;
+
+ xilinx_chan_desc_cleanup(chan);
+}
+
+static void dump_cur_bd(struct xdma_chan *chan)
+{
+ u32 index;
+
+ index = (((u32)DMA_IN(&chan->regs->cdr)) - chan->bd_phys_addr) /
+ sizeof(struct xdma_desc_hw);
+
+ dev_err(chan->dev, "cur bd @ %08x\n", (u32)DMA_IN(&chan->regs->cdr));
+ dev_err(chan->dev, " buf = %p\n",
+ (void *)chan->bds[index]->src_addr);
+ dev_err(chan->dev, " ctrl = 0x%08x\n", chan->bds[index]->control);
+ dev_err(chan->dev, " sts = 0x%08x\n", chan->bds[index]->status);
+ dev_err(chan->dev, " next = %p\n",
+ (void *)chan->bds[index]->next_desc);
+}
+
+static irqreturn_t xdma_rx_intr_handler(int irq, void *data)
+{
+ struct xdma_chan *chan = data;
+ u32 stat;
+
+ stat = DMA_IN(&chan->regs->sr);
+
+ if (!(stat & XDMA_XR_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ /* Ack the interrupts */
+ DMA_OUT(&chan->regs->sr, (stat & XDMA_XR_IRQ_ALL_MASK));
+
+ if (stat & XDMA_XR_IRQ_ERROR_MASK) {
+ dev_err(chan->dev, "Channel %s has errors %x, cdr %x tdr %x\n",
+ chan->name, (unsigned int)stat,
+ (unsigned int)DMA_IN(&chan->regs->cdr),
+ (unsigned int)DMA_IN(&chan->regs->tdr));
+
+ dump_cur_bd(chan);
+
+ chan->err = 1;
+ tasklet_schedule(&chan->dma_err_tasklet);
+ }
+
+ if (!(chan->poll_mode) && ((stat & XDMA_XR_IRQ_DELAY_MASK) ||
+ (stat & XDMA_XR_IRQ_IOC_MASK)))
+ tasklet_schedule(&chan->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xdma_tx_intr_handler(int irq, void *data)
+{
+ struct xdma_chan *chan = data;
+ u32 stat;
+
+ stat = DMA_IN(&chan->regs->sr);
+
+ if (!(stat & XDMA_XR_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ /* Ack the interrupts */
+ DMA_OUT(&chan->regs->sr, (stat & XDMA_XR_IRQ_ALL_MASK));
+
+ if (stat & XDMA_XR_IRQ_ERROR_MASK) {
+ dev_err(chan->dev, "Channel %s has errors %x, cdr %x tdr %x\n",
+ chan->name, (unsigned int)stat,
+ (unsigned int)DMA_IN(&chan->regs->cdr),
+ (unsigned int)DMA_IN(&chan->regs->tdr));
+
+ dump_cur_bd(chan);
+
+ chan->err = 1;
+ tasklet_schedule(&chan->dma_err_tasklet);
+ }
+
+ if (!(chan->poll_mode) && ((stat & XDMA_XR_IRQ_DELAY_MASK) ||
+ (stat & XDMA_XR_IRQ_IOC_MASK)))
+ tasklet_schedule(&chan->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void xdma_start_transfer(struct xdma_chan *chan,
+ int start_index,
+ int end_index)
+{
+ xlnk_intptr_type cur_phys;
+ xlnk_intptr_type tail_phys;
+ u32 regval;
+
+ if (chan->err)
+ return;
+
+ cur_phys = chan->bd_phys_addr + (start_index *
+ sizeof(struct xdma_desc_hw));
+ tail_phys = chan->bd_phys_addr + (end_index *
+ sizeof(struct xdma_desc_hw));
+ /* If hardware is busy, move the tail & return */
+ if (dma_is_running(chan) || dma_is_idle(chan)) {
+#if XLNK_SYS_BIT_WIDTH == 32
+ DMA_OUT(&chan->regs->tdr, tail_phys);
+#else
+ DMA_OUT_64(&chan->regs->tdr, tail_phys);
+#endif
+ return;
+ }
+
+#if XLNK_SYS_BIT_WIDTH == 32
+ DMA_OUT(&chan->regs->cdr, cur_phys);
+#else
+ DMA_OUT_64(&chan->regs->cdr, cur_phys);
+#endif
+
+ dma_start(chan);
+
+ /* Enable interrupts */
+ regval = DMA_IN(&chan->regs->cr);
+ regval |= (chan->poll_mode ? XDMA_XR_IRQ_ERROR_MASK
+ : XDMA_XR_IRQ_ALL_MASK);
+ DMA_OUT(&chan->regs->cr, regval);
+
+ /* Update tail ptr register and start the transfer */
+#if XLNK_SYS_BIT_WIDTH == 32
+ DMA_OUT(&chan->regs->tdr, tail_phys);
+#else
+ DMA_OUT_64(&chan->regs->tdr, tail_phys);
+#endif
+}
+
+static int xdma_setup_hw_desc(struct xdma_chan *chan,
+ struct xdma_head *dmahead,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_data_direction direction,
+ unsigned int nappwords_i,
+ u32 *appwords_i)
+{
+ struct xdma_desc_hw *bd = NULL;
+ size_t copy;
+ struct scatterlist *sg;
+ size_t sg_used;
+ dma_addr_t dma_src;
+ int i, start_index = -1, end_index1 = 0, end_index2 = -1;
+ int status;
+ unsigned long flags;
+ unsigned int bd_used_saved;
+
+ if (!chan) {
+ pr_err("Requested transfer on invalid channel\n");
+ return -ENODEV;
+ }
+
+ /* if we almost run out of bd, try to recycle some */
+ if ((chan->poll_mode) && (chan->bd_used >= XDMA_BD_CLEANUP_THRESHOLD))
+ xilinx_chan_desc_cleanup(chan);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ bd_used_saved = chan->bd_used;
+ /*
+ * Build transactions using information in the scatter gather list
+ */
+ for_each_sg(sgl, sg, sg_len, i) {
+ sg_used = 0;
+
+ /* Loop until the entire scatterlist entry is used */
+ while (sg_used < sg_dma_len(sg)) {
+ /* Allocate the link descriptor from DMA pool */
+ bd = chan->bds[chan->bd_tail];
+ if ((bd->control) & (XDMA_BD_STS_ACTUAL_LEN_MASK)) {
+ end_index2 = chan->bd_tail;
+ status = -ENOMEM;
+ /* If first was not set, then we failed to
+ * allocate the very first descriptor,
+ * and we're done
+ */
+ if (start_index == -1)
+ goto out_unlock;
+ else
+ goto out_clean;
+ }
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the DMA controller limit
+ */
+ copy = min((size_t)(sg_dma_len(sg) - sg_used),
+ (size_t)chan->max_len);
+ /*
+ * Only the src address for DMA
+ */
+ dma_src = sg_dma_address(sg) + sg_used;
+ bd->src_addr = dma_src;
+
+ /* Fill in the descriptor */
+ bd->control = copy;
+
+ /*
+ * If this is not the first descriptor, chain the
+ * current descriptor after the previous descriptor
+ *
+ * For the first DMA_TO_DEVICE transfer, set SOP
+ */
+ if (start_index == -1) {
+ start_index = chan->bd_tail;
+
+ if (nappwords_i)
+ memcpy(bd->app, appwords_i,
+ nappwords_i * sizeof(u32));
+
+ if (direction == DMA_TO_DEVICE)
+ bd->control |= XDMA_BD_SOP;
+ }
+
+ sg_used += copy;
+ end_index2 = chan->bd_tail;
+ chan->bd_tail++;
+ chan->bd_used++;
+ if (chan->bd_tail >= XDMA_MAX_BD_CNT) {
+ end_index1 = XDMA_MAX_BD_CNT;
+ chan->bd_tail = 0;
+ }
+ }
+ }
+
+ if (start_index == -1) {
+ status = -EINVAL;
+ goto out_unlock;
+ }
+
+ bd->dmahead = (xlnk_intptr_type)dmahead;
+ bd->sw_flag = chan->poll_mode ? XDMA_BD_SF_POLL_MODE_MASK : 0;
+ dmahead->last_bd_index = end_index2;
+
+ if (direction == DMA_TO_DEVICE)
+ bd->control |= XDMA_BD_EOP;
+
+ /* Barrier to assert control word write commits */
+ wmb();
+
+ xdma_start_transfer(chan, start_index, end_index2);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+
+out_clean:
+ if (!end_index1) {
+ for (i = start_index; i < end_index2; i++)
+ xdma_clean_bd(chan->bds[i]);
+ } else {
+ /* clean till the end of bd list first, and then 2nd end */
+ for (i = start_index; i < end_index1; i++)
+ xdma_clean_bd(chan->bds[i]);
+
+ end_index1 = 0;
+ for (i = end_index1; i < end_index2; i++)
+ xdma_clean_bd(chan->bds[i]);
+ }
+ /* Move the bd_tail back */
+ chan->bd_tail = start_index;
+ chan->bd_used = bd_used_saved;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return status;
+}
+
+/*
+ * create minimal length scatter gather list for physically contiguous buffer
+ * that starts at phy_buf and has length phy_buf_len bytes
+ */
+static unsigned int phy_buf_to_sgl(xlnk_intptr_type phy_buf,
+ unsigned int phy_buf_len,
+ struct scatterlist *sgl)
+{
+ unsigned int sgl_cnt = 0;
+ struct scatterlist *sgl_head;
+ unsigned int dma_len;
+ unsigned int num_bd;
+
+ if (!phy_buf || !phy_buf_len) {
+ pr_err("phy_buf is NULL or phy_buf_len = 0\n");
+ return sgl_cnt;
+ }
+
+ num_bd = (phy_buf_len + (XDMA_MAX_TRANS_LEN - 1))
+ / XDMA_MAX_TRANS_LEN;
+ sgl_head = sgl;
+ sg_init_table(sgl, num_bd);
+
+ while (phy_buf_len > 0) {
+ xlnk_intptr_type page_id = phy_buf >> PAGE_SHIFT;
+ unsigned int offset = phy_buf - (page_id << PAGE_SHIFT);
+
+ sgl_cnt++;
+ if (sgl_cnt > XDMA_MAX_BD_CNT)
+ return 0;
+
+ dma_len = (phy_buf_len > XDMA_MAX_TRANS_LEN) ?
+ XDMA_MAX_TRANS_LEN : phy_buf_len;
+
+ sg_set_page(sgl_head, pfn_to_page(page_id), dma_len, offset);
+ sg_dma_address(sgl_head) = (dma_addr_t)phy_buf;
+ sg_dma_len(sgl_head) = dma_len;
+ sgl_head = sg_next(sgl_head);
+
+ phy_buf += dma_len;
+ phy_buf_len -= dma_len;
+ }
+
+ return sgl_cnt;
+}
+
+/* merge sg list, sgl, with length sgl_len, to sgl_merged, to save dma bds */
+static unsigned int sgl_merge(struct scatterlist *sgl,
+ unsigned int sgl_len,
+ struct scatterlist *sgl_merged)
+{
+ struct scatterlist *sghead, *sgend, *sgnext, *sg_merged_head;
+ unsigned int sg_visited_cnt = 0, sg_merged_num = 0;
+ unsigned int dma_len = 0;
+
+ sg_init_table(sgl_merged, sgl_len);
+ sg_merged_head = sgl_merged;
+ sghead = sgl;
+
+ while (sghead && (sg_visited_cnt < sgl_len)) {
+ dma_len = sg_dma_len(sghead);
+ sgend = sghead;
+ sg_visited_cnt++;
+ sgnext = sg_next(sgend);
+
+ while (sgnext && (sg_visited_cnt < sgl_len)) {
+ if ((sg_dma_address(sgend) + sg_dma_len(sgend)) !=
+ sg_dma_address(sgnext))
+ break;
+
+ if (dma_len + sg_dma_len(sgnext) >= XDMA_MAX_TRANS_LEN)
+ break;
+
+ sgend = sgnext;
+ dma_len += sg_dma_len(sgend);
+ sg_visited_cnt++;
+ sgnext = sg_next(sgnext);
+ }
+
+ sg_merged_num++;
+ if (sg_merged_num > XDMA_MAX_BD_CNT)
+ return 0;
+
+ memcpy(sg_merged_head, sghead, sizeof(struct scatterlist));
+
+ sg_dma_len(sg_merged_head) = dma_len;
+
+ sg_merged_head = sg_next(sg_merged_head);
+ sghead = sg_next(sgend);
+ }
+
+ return sg_merged_num;
+}
+
+static int pin_user_pages(xlnk_intptr_type uaddr,
+ unsigned int ulen,
+ int write,
+ struct scatterlist **scatterpp,
+ unsigned int *cntp,
+ unsigned int user_flags)
+{
+ int status;
+ struct mm_struct *mm = current->mm;
+ unsigned int first_page;
+ unsigned int last_page;
+ unsigned int num_pages;
+ struct scatterlist *sglist;
+ struct page **mapped_pages;
+
+ unsigned int pgidx;
+ unsigned int pglen;
+ unsigned int pgoff;
+ unsigned int sublen;
+
+ first_page = uaddr / PAGE_SIZE;
+ last_page = (uaddr + ulen - 1) / PAGE_SIZE;
+ num_pages = last_page - first_page + 1;
+ mapped_pages = vmalloc(sizeof(*mapped_pages) * num_pages);
+ if (!mapped_pages)
+ return -ENOMEM;
+
+ down_read(&mm->mmap_sem);
+ status = get_user_pages(uaddr, num_pages,
+ (write ? FOLL_WRITE : 0) | FOLL_FORCE,
+ mapped_pages, NULL);
+ up_read(&mm->mmap_sem);
+
+ if (status == num_pages) {
+ sglist = kcalloc(num_pages,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!sglist) {
+ pr_err("%s: kcalloc failed to create sg list\n",
+ __func__);
+ vfree(mapped_pages);
+ return -ENOMEM;
+ }
+ sg_init_table(sglist, num_pages);
+ sublen = 0;
+ for (pgidx = 0; pgidx < status; pgidx++) {
+ if (pgidx == 0 && num_pages != 1) {
+ pgoff = uaddr & (~PAGE_MASK);
+ pglen = PAGE_SIZE - pgoff;
+ } else if (pgidx == 0 && num_pages == 1) {
+ pgoff = uaddr & (~PAGE_MASK);
+ pglen = ulen;
+ } else if (pgidx == num_pages - 1) {
+ pgoff = 0;
+ pglen = ulen - sublen;
+ } else {
+ pgoff = 0;
+ pglen = PAGE_SIZE;
+ }
+
+ sublen += pglen;
+
+ sg_set_page(&sglist[pgidx],
+ mapped_pages[pgidx],
+ pglen, pgoff);
+
+ sg_dma_len(&sglist[pgidx]) = pglen;
+ }
+
+ *scatterpp = sglist;
+ *cntp = num_pages;
+
+ vfree(mapped_pages);
+ return 0;
+ }
+ pr_err("Failed to pin user pages\n");
+ for (pgidx = 0; pgidx < status; pgidx++)
+ put_page(mapped_pages[pgidx]);
+ vfree(mapped_pages);
+ return -ENOMEM;
+}
+
+static int unpin_user_pages(struct scatterlist *sglist, unsigned int cnt)
+{
+ struct page *pg;
+ unsigned int i;
+
+ if (!sglist)
+ return 0;
+
+ for (i = 0; i < cnt; i++) {
+ pg = sg_page(sglist + i);
+ if (pg)
+ put_page(pg);
+ }
+
+ kfree(sglist);
+ return 0;
+}
+
+struct xdma_chan *xdma_request_channel(char *name)
+{
+ int i;
+ struct xdma_device *device, *tmp;
+
+ list_for_each_entry_safe(device, tmp, &dma_device_list, node) {
+ for (i = 0; i < device->channel_count; i++) {
+ if (!strcmp(device->chan[i]->name, name))
+ return device->chan[i];
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(xdma_request_channel);
+
+void xdma_release_channel(struct xdma_chan *chan)
+{ }
+EXPORT_SYMBOL(xdma_release_channel);
+
+void xdma_release_all_channels(void)
+{
+ int i;
+ struct xdma_device *device, *tmp;
+
+ list_for_each_entry_safe(device, tmp, &dma_device_list, node) {
+ for (i = 0; i < device->channel_count; i++) {
+ if (device->chan[i]->client_count) {
+ dma_halt(device->chan[i]);
+ xilinx_chan_desc_reinit(device->chan[i]);
+ pr_info("%s: chan %s freed\n",
+ __func__,
+ device->chan[i]->name);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(xdma_release_all_channels);
+
+static void xdma_release(struct device *dev)
+{
+}
+
+int xdma_submit(struct xdma_chan *chan,
+ xlnk_intptr_type userbuf,
+ void *kaddr,
+ unsigned int size,
+ unsigned int nappwords_i,
+ u32 *appwords_i,
+ unsigned int nappwords_o,
+ unsigned int user_flags,
+ struct xdma_head **dmaheadpp,
+ struct xlnk_dmabuf_reg *dp)
+{
+ struct xdma_head *dmahead;
+ struct scatterlist *pagelist = NULL;
+ struct scatterlist *sglist = NULL;
+ unsigned int pagecnt = 0;
+ unsigned int sgcnt = 0;
+ enum dma_data_direction dmadir;
+ int status;
+ unsigned long attrs = 0;
+
+ dmahead = kzalloc(sizeof(*dmahead), GFP_KERNEL);
+ if (!dmahead)
+ return -ENOMEM;
+
+ dmahead->chan = chan;
+ dmahead->userbuf = userbuf;
+ dmahead->size = size;
+ dmahead->dmadir = chan->direction;
+ dmahead->userflag = user_flags;
+ dmahead->dmabuf = dp;
+ dmadir = chan->direction;
+
+ if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (dp) {
+ int i;
+ struct scatterlist *sg;
+ unsigned int remaining_size = size;
+
+ if (IS_ERR_OR_NULL(dp->dbuf_sg_table)) {
+ pr_err("%s dmabuf not mapped: %p\n",
+ __func__, dp->dbuf_sg_table);
+ return -EINVAL;
+ }
+ if (dp->dbuf_sg_table->nents == 0) {
+ pr_err("%s: cannot map a scatterlist with 0 entries\n",
+ __func__);
+ return -EINVAL;
+ }
+ sglist = kmalloc_array(dp->dbuf_sg_table->nents,
+ sizeof(*sglist),
+ GFP_KERNEL);
+ if (!sglist)
+ return -ENOMEM;
+
+ sg_init_table(sglist, dp->dbuf_sg_table->nents);
+ sgcnt = 0;
+ for_each_sg(dp->dbuf_sg_table->sgl,
+ sg,
+ dp->dbuf_sg_table->nents,
+ i) {
+ sg_set_page(sglist + i,
+ sg_page(sg),
+ sg_dma_len(sg),
+ sg->offset);
+ sg_dma_address(sglist + i) = sg_dma_address(sg);
+ if (remaining_size == 0) {
+ sg_dma_len(sglist + i) = 0;
+ } else if (sg_dma_len(sg) > remaining_size) {
+ sg_dma_len(sglist + i) = remaining_size;
+ sgcnt++;
+ } else {
+ sg_dma_len(sglist + i) = sg_dma_len(sg);
+ remaining_size -= sg_dma_len(sg);
+ sgcnt++;
+ }
+ }
+ dmahead->userbuf = (xlnk_intptr_type)sglist->dma_address;
+ pagelist = NULL;
+ pagecnt = 0;
+ } else if (user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
+ size_t elem_cnt;
+
+ elem_cnt = DIV_ROUND_UP(size, XDMA_MAX_TRANS_LEN);
+ sglist = kmalloc_array(elem_cnt, sizeof(*sglist), GFP_KERNEL);
+ sgcnt = phy_buf_to_sgl(userbuf, size, sglist);
+ if (!sgcnt)
+ return -ENOMEM;
+
+ status = get_dma_ops(chan->dev)->map_sg(chan->dev,
+ sglist,
+ sgcnt,
+ dmadir,
+ attrs);
+
+ if (!status) {
+ pr_err("sg contiguous mapping failed\n");
+ return -ENOMEM;
+ }
+ pagelist = NULL;
+ pagecnt = 0;
+ } else {
+ status = pin_user_pages(userbuf,
+ size,
+ dmadir != DMA_TO_DEVICE,
+ &pagelist,
+ &pagecnt,
+ user_flags);
+ if (status < 0) {
+ pr_err("pin_user_pages failed\n");
+ return status;
+ }
+
+ status = get_dma_ops(chan->dev)->map_sg(chan->dev,
+ pagelist,
+ pagecnt,
+ dmadir,
+ attrs);
+ if (!status) {
+ pr_err("dma_map_sg failed\n");
+ unpin_user_pages(pagelist, pagecnt);
+ return -ENOMEM;
+ }
+
+ sglist = kmalloc_array(pagecnt, sizeof(*sglist), GFP_KERNEL);
+ if (sglist)
+ sgcnt = sgl_merge(pagelist, pagecnt, sglist);
+ if (!sgcnt) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ pagelist,
+ pagecnt,
+ dmadir,
+ attrs);
+ unpin_user_pages(pagelist, pagecnt);
+ kfree(sglist);
+ return -ENOMEM;
+ }
+ }
+ dmahead->sglist = sglist;
+ dmahead->sgcnt = sgcnt;
+ dmahead->pagelist = pagelist;
+ dmahead->pagecnt = pagecnt;
+
+ /* skipping config */
+ init_completion(&dmahead->cmp);
+
+ if (nappwords_i > XDMA_MAX_APPWORDS)
+ nappwords_i = XDMA_MAX_APPWORDS;
+
+ if (nappwords_o > XDMA_MAX_APPWORDS)
+ nappwords_o = XDMA_MAX_APPWORDS;
+
+ dmahead->nappwords_o = nappwords_o;
+
+ status = xdma_setup_hw_desc(chan, dmahead, sglist, sgcnt,
+ dmadir, nappwords_i, appwords_i);
+ if (status) {
+ pr_err("setup hw desc failed\n");
+ if (dmahead->pagelist) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ pagelist,
+ pagecnt,
+ dmadir,
+ attrs);
+ unpin_user_pages(pagelist, pagecnt);
+ } else if (!dp) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ sglist,
+ sgcnt,
+ dmadir,
+ attrs);
+ }
+ kfree(dmahead->sglist);
+ return -ENOMEM;
+ }
+
+ *dmaheadpp = dmahead;
+ return 0;
+}
+EXPORT_SYMBOL(xdma_submit);
+
+int xdma_wait(struct xdma_head *dmahead,
+ unsigned int user_flags,
+ unsigned int *operating_flags)
+{
+ struct xdma_chan *chan = dmahead->chan;
+ unsigned long attrs = 0;
+
+ if (chan->poll_mode) {
+ xilinx_chan_desc_cleanup(chan);
+ *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
+ } else {
+ if (*operating_flags & XDMA_FLAGS_TRYWAIT) {
+ if (!try_wait_for_completion(&dmahead->cmp))
+ return 0;
+ *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
+ } else {
+ wait_for_completion(&dmahead->cmp);
+ *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
+ }
+ }
+
+ if (!dmahead->dmabuf) {
+ if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ dmahead->sglist,
+ dmahead->sgcnt,
+ dmahead->dmadir,
+ attrs);
+ } else {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ dmahead->pagelist,
+ dmahead->pagecnt,
+ dmahead->dmadir,
+ attrs);
+ unpin_user_pages(dmahead->pagelist, dmahead->pagecnt);
+ }
+ }
+ kfree(dmahead->sglist);
+
+ return 0;
+}
+EXPORT_SYMBOL(xdma_wait);
+
+int xdma_getconfig(struct xdma_chan *chan,
+ unsigned char *irq_thresh,
+ unsigned char *irq_delay)
+{
+ *irq_thresh = (DMA_IN(&chan->regs->cr) >> XDMA_COALESCE_SHIFT) & 0xff;
+ *irq_delay = (DMA_IN(&chan->regs->cr) >> XDMA_DELAY_SHIFT) & 0xff;
+ return 0;
+}
+EXPORT_SYMBOL(xdma_getconfig);
+
+int xdma_setconfig(struct xdma_chan *chan,
+ unsigned char irq_thresh,
+ unsigned char irq_delay)
+{
+ unsigned long val;
+
+ if (dma_is_running(chan))
+ return -EBUSY;
+
+ val = DMA_IN(&chan->regs->cr);
+ val &= ~((0xff << XDMA_COALESCE_SHIFT) |
+ (0xff << XDMA_DELAY_SHIFT));
+ val |= ((irq_thresh << XDMA_COALESCE_SHIFT) |
+ (irq_delay << XDMA_DELAY_SHIFT));
+
+ DMA_OUT(&chan->regs->cr, val);
+ return 0;
+}
+EXPORT_SYMBOL(xdma_setconfig);
+
+static const struct of_device_id gic_match[] = {
+ { .compatible = "arm,cortex-a9-gic", },
+ { .compatible = "arm,cortex-a15-gic", },
+ { },
+};
+
+static struct device_node *gic_node;
+
+unsigned int xlate_irq(unsigned int hwirq)
+{
+ struct of_phandle_args irq_data;
+ unsigned int irq;
+
+ if (!gic_node)
+ gic_node = of_find_matching_node(NULL, gic_match);
+
+ if (WARN_ON(!gic_node))
+ return hwirq;
+
+ irq_data.np = gic_node;
+ irq_data.args_count = 3;
+ irq_data.args[0] = 0;
+#if XLNK_SYS_BIT_WIDTH == 32
+ irq_data.args[1] = hwirq - 32; /* GIC SPI offset */
+#else
+ irq_data.args[1] = hwirq;
+#endif
+ irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
+
+ irq = irq_create_of_mapping(&irq_data);
+ if (WARN_ON(!irq))
+ irq = hwirq;
+
+ pr_info("%s: hwirq %d, irq %d\n", __func__, hwirq, irq);
+
+ return irq;
+}
+
+/* Brute-force probing for xilinx DMA
+ */
+static int xdma_probe(struct platform_device *pdev)
+{
+ struct xdma_device *xdev;
+ struct resource *res;
+ int err, i, j;
+ struct xdma_chan *chan;
+ struct xdma_device_config *dma_config;
+ int dma_chan_dir;
+ int dma_chan_reg_offset;
+
+ pr_info("%s: probe dma %p, nres %d, id %d\n", __func__,
+ &pdev->dev, pdev->num_resources, pdev->id);
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(struct xdma_device), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+ xdev->dev = &pdev->dev;
+
+ /* Set this as configurable once HPC works */
+ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, false);
+ dma_set_mask(&pdev->dev, 0xFFFFFFFFFFFFFFFFull);
+
+ dma_config = (struct xdma_device_config *)xdev->dev->platform_data;
+ if (dma_config->channel_count < 1 || dma_config->channel_count > 2)
+ return -EFAULT;
+
+ /* Get the memory resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (!xdev->regs) {
+ dev_err(&pdev->dev, "unable to iomap registers\n");
+ return -EFAULT;
+ }
+
+ dev_info(&pdev->dev, "AXIDMA device %d physical base address=%pa\n",
+ pdev->id, &res->start);
+ dev_info(&pdev->dev, "AXIDMA device %d remapped to %pa\n",
+ pdev->id, &xdev->regs);
+
+ /* Allocate the channels */
+
+ dev_info(&pdev->dev, "has %d channel(s)\n", dma_config->channel_count);
+ for (i = 0; i < dma_config->channel_count; i++) {
+ chan = devm_kzalloc(&pdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ dma_chan_dir = strcmp(dma_config->channel_config[i].type,
+ "axi-dma-mm2s-channel") ?
+ DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+ dma_chan_reg_offset = (dma_chan_dir == DMA_TO_DEVICE) ?
+ 0 :
+ 0x30;
+
+ /* Initialize channel parameters */
+ chan->id = i;
+ chan->regs = xdev->regs + dma_chan_reg_offset;
+ /* chan->regs = xdev->regs; */
+ chan->dev = xdev->dev;
+ chan->max_len = XDMA_MAX_TRANS_LEN;
+ chan->direction = dma_chan_dir;
+ sprintf(chan->name, "%s:%d", dma_config->name, chan->id);
+ pr_info(" chan %d name: %s\n", chan->id, chan->name);
+ pr_info(" chan %d direction: %s\n", chan->id,
+ dma_chan_dir == DMA_FROM_DEVICE ?
+ "FROM_DEVICE" : "TO_DEVICE");
+
+ spin_lock_init(&chan->lock);
+ tasklet_init(&chan->tasklet,
+ xdma_tasklet,
+ (unsigned long)chan);
+ tasklet_init(&chan->dma_err_tasklet,
+ xdma_err_tasklet,
+ (unsigned long)chan);
+
+ xdev->chan[chan->id] = chan;
+
+ /* The IRQ resource */
+ chan->irq = xlate_irq(dma_config->channel_config[i].irq);
+ if (chan->irq <= 0) {
+ pr_err("get_resource for IRQ for dev %d failed\n",
+ pdev->id);
+ return -ENODEV;
+ }
+
+ err = devm_request_irq(&pdev->dev,
+ chan->irq,
+ dma_chan_dir == DMA_TO_DEVICE ?
+ xdma_tx_intr_handler :
+ xdma_rx_intr_handler,
+ IRQF_SHARED,
+ pdev->name,
+ chan);
+ if (err) {
+ dev_err(&pdev->dev, "unable to request IRQ\n");
+ return err;
+ }
+ pr_info(" chan%d irq: %d\n", chan->id, chan->irq);
+
+ chan->poll_mode = dma_config->channel_config[i].poll_mode;
+ pr_info(" chan%d poll mode: %s\n",
+ chan->id,
+ chan->poll_mode ? "on" : "off");
+
+ /* Allocate channel BD's */
+ err = xdma_alloc_chan_descriptors(xdev->chan[chan->id]);
+ if (err) {
+ dev_err(&pdev->dev, "unable to allocate BD's\n");
+ return -ENOMEM;
+ }
+ pr_info(" chan%d bd ring @ 0x%p (size: 0x%x bytes)\n",
+ chan->id,
+ (void *)chan->bd_phys_addr,
+ chan->bd_chain_size);
+
+ err = dma_init(xdev->chan[chan->id]);
+ if (err) {
+ dev_err(&pdev->dev, "DMA init failed\n");
+ /* FIXME Check this - unregister all chan resources */
+ for (j = 0; j <= i; j++)
+ xdma_free_chan_resources(xdev->chan[j]);
+ return -EIO;
+ }
+ }
+ xdev->channel_count = dma_config->channel_count;
+ pdev->dev.release = xdma_release;
+ /* Add the DMA device to the global list */
+ mutex_lock(&dma_list_mutex);
+ list_add_tail(&xdev->node, &dma_device_list);
+ mutex_unlock(&dma_list_mutex);
+
+ platform_set_drvdata(pdev, xdev);
+
+ return 0;
+}
+
+static int xdma_remove(struct platform_device *pdev)
+{
+ int i;
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+
+ /* Remove the DMA device from the global list */
+ mutex_lock(&dma_list_mutex);
+ list_del(&xdev->node);
+ mutex_unlock(&dma_list_mutex);
+
+ for (i = 0; i < XDMA_MAX_CHANS_PER_DEVICE; i++) {
+ if (xdev->chan[i])
+ xdma_free_chan_resources(xdev->chan[i]);
+ }
+
+ return 0;
+}
+
+static struct platform_driver xdma_driver = {
+ .probe = xdma_probe,
+ .remove = xdma_remove,
+ .driver = {
+ .name = "xilinx-axidma",
+ },
+};
+
+module_platform_driver(xdma_driver);
+
+MODULE_DESCRIPTION("Xilinx DMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/apf/xilinx-dma-apf.h b/drivers/staging/apf/xilinx-dma-apf.h
new file mode 100644
index 000000000000..8837fec01779
--- /dev/null
+++ b/drivers/staging/apf/xilinx-dma-apf.h
@@ -0,0 +1,234 @@
+/*
+ * Xilinx AXI DMA Engine support
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __XILINX_DMA_APF_H
+#define __XILINX_DMA_APF_H
+
+/* ioctls */
+#include <linux/ioctl.h>
+
+/* tasklet */
+#include <linux/interrupt.h>
+
+/* dma stuff */
+#include <linux/dma-mapping.h>
+
+/* xlnk structures */
+#include "xlnk.h"
+#include "xlnk-sysdef.h"
+
+#define XDMA_IOC_MAGIC 'X'
+#define XDMA_IOCRESET _IO(XDMA_IOC_MAGIC, 0)
+#define XDMA_IOCREQUEST _IOWR(XDMA_IOC_MAGIC, 1, unsigned long)
+#define XDMA_IOCRELEASE _IOWR(XDMA_IOC_MAGIC, 2, unsigned long)
+#define XDMA_IOCSUBMIT _IOWR(XDMA_IOC_MAGIC, 3, unsigned long)
+#define XDMA_IOCWAIT _IOWR(XDMA_IOC_MAGIC, 4, unsigned long)
+#define XDMA_IOCGETCONFIG _IOWR(XDMA_IOC_MAGIC, 5, unsigned long)
+#define XDMA_IOCSETCONFIG _IOWR(XDMA_IOC_MAGIC, 6, unsigned long)
+#define XDMA_IOC_MAXNR 6
+
+/* Specific hardware configuration-related constants
+ */
+#define XDMA_RESET_LOOP 1000000
+#define XDMA_HALT_LOOP 1000000
+#define XDMA_NO_CHANGE 0xFFFF
+
+/* General register bits definitions
+ */
+#define XDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
+#define XDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA engine */
+
+#define XDMA_SR_HALTED_MASK 0x00000001 /* DMA channel halted */
+#define XDMA_SR_IDLE_MASK 0x00000002 /* DMA channel idle */
+
+#define XDMA_SR_ERR_INTERNAL_MASK 0x00000010/* Datamover internal err */
+#define XDMA_SR_ERR_SLAVE_MASK 0x00000020 /* Datamover slave err */
+#define XDMA_SR_ERR_DECODE_MASK 0x00000040 /* Datamover decode err */
+#define XDMA_SR_ERR_SG_INT_MASK 0x00000100 /* SG internal err */
+#define XDMA_SR_ERR_SG_SLV_MASK 0x00000200 /* SG slave err */
+#define XDMA_SR_ERR_SG_DEC_MASK 0x00000400 /* SG decode err */
+#define XDMA_SR_ERR_ALL_MASK 0x00000770 /* All errors */
+
+#define XDMA_XR_IRQ_IOC_MASK 0x00001000 /* Completion interrupt */
+#define XDMA_XR_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
+#define XDMA_XR_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
+#define XDMA_XR_IRQ_ALL_MASK 0x00007000 /* All interrupts */
+
+#define XDMA_XR_DELAY_MASK 0xFF000000 /* Delay timeout counter */
+#define XDMA_XR_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
+
+#define XDMA_DELAY_SHIFT 24
+#define XDMA_COALESCE_SHIFT 16
+
+#define XDMA_DELAY_MAX 0xFF /**< Maximum delay counter value */
+#define XDMA_COALESCE_MAX 0xFF /**< Maximum coalescing counter value */
+
+/* BD definitions for Axi DMA
+ */
+#define XDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF
+#define XDMA_BD_STS_COMPL_MASK 0x80000000
+#define XDMA_BD_STS_ERR_MASK 0x70000000
+#define XDMA_BD_STS_ALL_MASK 0xF0000000
+
+/* DMA BD special bits definitions
+ */
+#define XDMA_BD_SOP 0x08000000 /* Start of packet bit */
+#define XDMA_BD_EOP 0x04000000 /* End of packet bit */
+
+/* BD Software Flag definitions for Axi DMA
+ */
+#define XDMA_BD_SF_POLL_MODE_MASK 0x00000002
+#define XDMA_BD_SF_SW_DONE_MASK 0x00000001
+
+/* driver defines */
+#define XDMA_MAX_BD_CNT 16384
+#define XDMA_MAX_CHANS_PER_DEVICE 2
+#define XDMA_MAX_TRANS_LEN 0x7FF000
+#define XDMA_MAX_APPWORDS 5
+#define XDMA_BD_CLEANUP_THRESHOLD ((XDMA_MAX_BD_CNT * 8) / 10)
+
+#define XDMA_FLAGS_WAIT_COMPLETE 1
+#define XDMA_FLAGS_TRYWAIT 2
+
+/* Platform data definition until ARM supports device tree */
+struct xdma_channel_config {
+ char *type;
+ unsigned int include_dre;
+ unsigned int datawidth;
+ unsigned int max_burst_len;
+ unsigned int irq;
+ unsigned int poll_mode;
+ unsigned int lite_mode;
+};
+
+struct xdma_device_config {
+ char *type;
+ char *name;
+ unsigned int include_sg;
+ unsigned int sg_include_stscntrl_strm; /* dma only */
+ unsigned int channel_count;
+ struct xdma_channel_config *channel_config;
+};
+
+struct xdma_desc_hw {
+ xlnk_intptr_type next_desc; /* 0x00 */
+#if XLNK_SYS_BIT_WIDTH == 32
+ u32 pad1; /* 0x04 */
+#endif
+ xlnk_intptr_type src_addr; /* 0x08 */
+#if XLNK_SYS_BIT_WIDTH == 32
+ u32 pad2; /* 0x0c */
+#endif
+ u32 addr_vsize; /* 0x10 */
+ u32 hsize; /* 0x14 */
+ u32 control; /* 0x18 */
+ u32 status; /* 0x1c */
+ u32 app[5]; /* 0x20 */
+ xlnk_intptr_type dmahead;
+#if XLNK_SYS_BIT_WIDTH == 32
+ u32 Reserved0;
+#endif
+ u32 sw_flag; /* 0x3C */
+} __aligned(64);
+
+/* shared by all Xilinx DMA engines */
+struct xdma_regs {
+ u32 cr; /* 0x00 Control Register */
+ u32 sr; /* 0x04 Status Register */
+ u32 cdr; /* 0x08 Current Descriptor Register */
+ u32 cdr_hi;
+ u32 tdr; /* 0x10 Tail Descriptor Register */
+ u32 tdr_hi;
+ u32 src; /* 0x18 Source Address Register (cdma) */
+ u32 src_hi;
+ u32 dst; /* 0x20 Destination Address Register (cdma) */
+ u32 dst_hi;
+ u32 btt_ref; /* 0x28 Bytes To Transfer (cdma) or
+ * park_ref (vdma)
+ */
+ u32 version; /* 0x2c version (vdma) */
+};
+
+/* Per DMA specific operations should be embedded in the channel structure */
+struct xdma_chan {
+ char name[64];
+ struct xdma_regs __iomem *regs;
+ struct device *dev; /* The dma device */
+ struct xdma_desc_hw *bds[XDMA_MAX_BD_CNT];
+ dma_addr_t bd_phys_addr;
+ u32 bd_chain_size;
+ int bd_cur;
+ int bd_tail;
+ unsigned int bd_used; /* # of BDs passed to hw chan */
+ enum dma_data_direction direction; /* Transfer direction */
+ int id; /* Channel ID */
+ int irq; /* Channel IRQ */
+ int poll_mode; /* Poll mode turned on? */
+ spinlock_t lock; /* Descriptor operation lock */
+ struct tasklet_struct tasklet; /* Cleanup work after irq */
+ struct tasklet_struct dma_err_tasklet; /* Cleanup work after irq */
+ int max_len; /* Maximum len per transfer */
+ int err; /* Channel has errors */
+ int client_count;
+};
+
+struct xdma_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct list_head node;
+ struct xdma_chan *chan[XDMA_MAX_CHANS_PER_DEVICE];
+ u8 channel_count;
+};
+
+struct xdma_head {
+ xlnk_intptr_type userbuf;
+ unsigned int size;
+ unsigned int dmaflag;
+ enum dma_data_direction dmadir;
+ struct scatterlist *sglist;
+ unsigned int sgcnt;
+ struct scatterlist *pagelist;
+ unsigned int pagecnt;
+ struct completion cmp;
+ struct xdma_chan *chan;
+ unsigned int nappwords_o;
+ u32 appwords_o[XDMA_MAX_APPWORDS];
+ unsigned int userflag;
+ u32 last_bd_index;
+ struct xlnk_dmabuf_reg *dmabuf;
+};
+
+struct xdma_chan *xdma_request_channel(char *name);
+void xdma_release_channel(struct xdma_chan *chan);
+void xdma_release_all_channels(void);
+int xdma_submit(struct xdma_chan *chan,
+ xlnk_intptr_type userbuf,
+ void *kaddr,
+ unsigned int size,
+ unsigned int nappwords_i,
+ u32 *appwords_i,
+ unsigned int nappwords_o,
+ unsigned int user_flags,
+ struct xdma_head **dmaheadpp,
+ struct xlnk_dmabuf_reg *dp);
+int xdma_wait(struct xdma_head *dmahead,
+ unsigned int user_flags,
+ unsigned int *operating_flags);
+int xdma_getconfig(struct xdma_chan *chan,
+ unsigned char *irq_thresh,
+ unsigned char *irq_delay);
+int xdma_setconfig(struct xdma_chan *chan,
+ unsigned char irq_thresh,
+ unsigned char irq_delay);
+unsigned int xlate_irq(unsigned int hwirq);
+
+#endif
diff --git a/drivers/staging/apf/xlnk-eng.c b/drivers/staging/apf/xlnk-eng.c
new file mode 100644
index 000000000000..bc40128e93cf
--- /dev/null
+++ b/drivers/staging/apf/xlnk-eng.c
@@ -0,0 +1,242 @@
+/*
+ * Xilinx XLNK Engine Driver
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/uio_driver.h>
+
+
+#include "xlnk-eng.h"
+
+static DEFINE_MUTEX(xlnk_eng_list_mutex);
+static LIST_HEAD(xlnk_eng_list);
+
+int xlnk_eng_register_device(struct xlnk_eng_device *xlnk_dev)
+{
+ mutex_lock(&xlnk_eng_list_mutex);
+ /* todo: need to add more error checking */
+
+ list_add_tail(&xlnk_dev->global_node, &xlnk_eng_list);
+
+ mutex_unlock(&xlnk_eng_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(xlnk_eng_register_device);
+
+
+void xlnk_eng_unregister_device(struct xlnk_eng_device *xlnk_dev)
+{
+ mutex_lock(&xlnk_eng_list_mutex);
+ /* todo: need to add more error checking */
+
+ list_del(&xlnk_dev->global_node);
+
+ mutex_unlock(&xlnk_eng_list_mutex);
+}
+EXPORT_SYMBOL(xlnk_eng_unregister_device);
+
+struct xlnk_eng_device *xlnk_eng_request_by_name(char *name)
+{
+ struct xlnk_eng_device *device, *_d;
+ int found = 0;
+
+ mutex_lock(&xlnk_eng_list_mutex);
+
+ list_for_each_entry_safe(device, _d, &xlnk_eng_list, global_node) {
+ if (!strcmp(dev_name(device->dev), name)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ device = device->alloc(device);
+ else
+ device = NULL;
+
+ mutex_unlock(&xlnk_eng_list_mutex);
+
+ return device;
+}
+EXPORT_SYMBOL(xlnk_eng_request_by_name);
+
+/**
+ * struct xilinx_xlnk_eng_device - device structure for xilinx_xlnk_eng
+ * @common: common device info
+ * @base: base address for device
+ * @lock: lock used by device
+ * @cnt: usage count
+ * @info: info for registering and unregistering uio device
+ */
+struct xilinx_xlnk_eng_device {
+ struct xlnk_eng_device common;
+ void __iomem *base;
+ spinlock_t lock;
+ int cnt;
+ struct uio_info *info;
+};
+
+static void xlnk_eng_release(struct device *dev)
+{
+ struct xilinx_xlnk_eng_device *xdev;
+ struct xlnk_eng_device *xlnk_dev;
+
+ xdev = dev_get_drvdata(dev);
+ xlnk_dev = &xdev->common;
+ if (!xlnk_dev)
+ return;
+
+ xlnk_dev->free(xlnk_dev);
+}
+
+#define DRIVER_NAME "xilinx-xlnk-eng"
+
+#define to_xilinx_xlnk(dev) container_of(dev, \
+ struct xilinx_xlnk_eng_device, common)
+
+static struct xlnk_eng_device *xilinx_xlnk_alloc(
+ struct xlnk_eng_device *xlnkdev)
+{
+ struct xilinx_xlnk_eng_device *xdev;
+ struct xlnk_eng_device *retdev;
+
+ xdev = to_xilinx_xlnk(xlnkdev);
+
+ if (xdev->cnt == 0) {
+ xdev->cnt++;
+ retdev = xlnkdev;
+ } else
+ retdev = NULL;
+
+ return retdev;
+}
+
+static void xilinx_xlnk_free(struct xlnk_eng_device *xlnkdev)
+{
+ struct xilinx_xlnk_eng_device *xdev;
+
+ xdev = to_xilinx_xlnk(xlnkdev);
+
+ xdev->cnt = 0;
+}
+
+static int xlnk_eng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct xilinx_xlnk_eng_device *xdev;
+ struct uio_info *info;
+ char *devname;
+
+ pr_info("xlnk_eng_probe ...\n");
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev) {
+ dev_err(&pdev->dev, "Not enough memory for device\n");
+ return -ENOMEM;
+ }
+
+ /* more error handling */
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "Not enough memory for device\n");
+ return -ENOMEM;
+ }
+ xdev->info = info;
+ devname = devm_kzalloc(&pdev->dev, 64, GFP_KERNEL);
+ if (!devname) {
+ dev_err(&pdev->dev, "Not enough memory for device\n");
+ return -ENOMEM;
+ }
+ sprintf(devname, "%s.%d", DRIVER_NAME, pdev->id);
+ pr_info("uio name %s\n", devname);
+ /* iomap registers */
+
+ /* Get the data from the platform device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->base = devm_ioremap_resource(&pdev->dev, res);
+
+ /* %pa types should be used here */
+ dev_info(&pdev->dev, "physical base : 0x%lx\n",
+ (unsigned long)res->start);
+ dev_info(&pdev->dev, "register range : 0x%lx\n",
+ (unsigned long)resource_size(res));
+ dev_info(&pdev->dev, "base remapped to: 0x%lx\n",
+ (unsigned long)xdev->base);
+ if (!xdev->base) {
+ dev_err(&pdev->dev, "unable to iomap registers\n");
+ return -ENOMEM;
+ }
+
+ info->mem[0].addr = res->start;
+ info->mem[0].size = resource_size(res);
+ info->mem[0].memtype = UIO_MEM_PHYS;
+ info->mem[0].internal_addr = xdev->base;
+
+ /* info->name = DRIVER_NAME; */
+ info->name = devname;
+ info->version = "0.0.1";
+
+ info->irq = -1;
+
+ xdev->common.dev = &pdev->dev;
+
+ xdev->common.alloc = xilinx_xlnk_alloc;
+ xdev->common.free = xilinx_xlnk_free;
+ xdev->common.dev->release = xlnk_eng_release;
+
+ dev_set_drvdata(&pdev->dev, xdev);
+
+ spin_lock_init(&xdev->lock);
+
+ xdev->cnt = 0;
+
+ xlnk_eng_register_device(&xdev->common);
+
+ if (uio_register_device(&pdev->dev, info)) {
+ dev_err(&pdev->dev, "uio_register_device failed\n");
+ return -ENODEV;
+ }
+ dev_info(&pdev->dev, "xilinx-xlnk-eng uio registered\n");
+
+ return 0;
+}
+
+static int xlnk_eng_remove(struct platform_device *pdev)
+{
+ struct uio_info *info;
+ struct xilinx_xlnk_eng_device *xdev;
+
+ xdev = dev_get_drvdata(&pdev->dev);
+ info = xdev->info;
+
+ uio_unregister_device(info);
+ dev_info(&pdev->dev, "xilinx-xlnk-eng uio unregistered\n");
+ xlnk_eng_unregister_device(&xdev->common);
+
+ return 0;
+}
+
+static struct platform_driver xlnk_eng_driver = {
+ .probe = xlnk_eng_probe,
+ .remove = xlnk_eng_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(xlnk_eng_driver);
+
+MODULE_DESCRIPTION("Xilinx xlnk engine generic driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/apf/xlnk-eng.h b/drivers/staging/apf/xlnk-eng.h
new file mode 100644
index 000000000000..9f9519664705
--- /dev/null
+++ b/drivers/staging/apf/xlnk-eng.h
@@ -0,0 +1,33 @@
+/*
+ * Xilinx XLNK Engine Driver
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ *
+ */
+
+#ifndef XLNK_ENG_H
+#define XLNK_ENG_H
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+
+struct xlnk_eng_device {
+ struct list_head global_node;
+ struct xlnk_eng_device * (*alloc)(struct xlnk_eng_device *xdev);
+ void (*free)(struct xlnk_eng_device *xdev);
+ struct device *dev;
+};
+extern int xlnk_eng_register_device(struct xlnk_eng_device *xlnk_dev);
+extern void xlnk_eng_unregister_device(struct xlnk_eng_device *xlnk_dev);
+extern struct xlnk_eng_device *xlnk_eng_request_by_name(char *name);
+
+#endif
+
diff --git a/drivers/staging/apf/xlnk-ioctl.h b/drivers/staging/apf/xlnk-ioctl.h
new file mode 100644
index 000000000000..d909fa65459f
--- /dev/null
+++ b/drivers/staging/apf/xlnk-ioctl.h
@@ -0,0 +1,37 @@
+#ifndef _XLNK_IOCTL_H
+#define _XLNK_IOCTL_H
+
+#include <linux/ioctl.h>
+
+#define XLNK_IOC_MAGIC 'X'
+
+#define XLNK_IOCRESET _IO(XLNK_IOC_MAGIC, 0)
+
+#define XLNK_IOCALLOCBUF _IOWR(XLNK_IOC_MAGIC, 2, unsigned long)
+#define XLNK_IOCFREEBUF _IOWR(XLNK_IOC_MAGIC, 3, unsigned long)
+#define XLNK_IOCADDDMABUF _IOWR(XLNK_IOC_MAGIC, 4, unsigned long)
+#define XLNK_IOCCLEARDMABUF _IOWR(XLNK_IOC_MAGIC, 5, unsigned long)
+
+#define XLNK_IOCDMAREQUEST _IOWR(XLNK_IOC_MAGIC, 7, unsigned long)
+#define XLNK_IOCDMASUBMIT _IOWR(XLNK_IOC_MAGIC, 8, unsigned long)
+#define XLNK_IOCDMAWAIT _IOWR(XLNK_IOC_MAGIC, 9, unsigned long)
+#define XLNK_IOCDMARELEASE _IOWR(XLNK_IOC_MAGIC, 10, unsigned long)
+
+#define XLNK_IOCMEMOP _IOWR(XLNK_IOC_MAGIC, 25, unsigned long)
+#define XLNK_IOCDEVREGISTER _IOWR(XLNK_IOC_MAGIC, 16, unsigned long)
+#define XLNK_IOCDMAREGISTER _IOWR(XLNK_IOC_MAGIC, 17, unsigned long)
+#define XLNK_IOCDEVUNREGISTER _IOWR(XLNK_IOC_MAGIC, 18, unsigned long)
+#define XLNK_IOCCDMAREQUEST _IOWR(XLNK_IOC_MAGIC, 19, unsigned long)
+#define XLNK_IOCCDMASUBMIT _IOWR(XLNK_IOC_MAGIC, 20, unsigned long)
+#define XLNK_IOCMCDMAREGISTER _IOWR(XLNK_IOC_MAGIC, 23, unsigned long)
+#define XLNK_IOCCACHECTRL _IOWR(XLNK_IOC_MAGIC, 24, unsigned long)
+
+#define XLNK_IOCIRQREGISTER _IOWR(XLNK_IOC_MAGIC, 35, unsigned long)
+#define XLNK_IOCIRQUNREGISTER _IOWR(XLNK_IOC_MAGIC, 36, unsigned long)
+#define XLNK_IOCIRQWAIT _IOWR(XLNK_IOC_MAGIC, 37, unsigned long)
+
+#define XLNK_IOCSHUTDOWN _IOWR(XLNK_IOC_MAGIC, 100, unsigned long)
+#define XLNK_IOCRECRES _IOWR(XLNK_IOC_MAGIC, 101, unsigned long)
+#define XLNK_IOC_MAXNR 101
+
+#endif
diff --git a/drivers/staging/apf/xlnk-sysdef.h b/drivers/staging/apf/xlnk-sysdef.h
new file mode 100644
index 000000000000..b6334be3b9c4
--- /dev/null
+++ b/drivers/staging/apf/xlnk-sysdef.h
@@ -0,0 +1,34 @@
+#ifndef XLNK_SYSDEF_H
+#define XLNK_SYSDEF_H
+
+#if __SIZEOF_POINTER__ == 4
+ #define XLNK_SYS_BIT_WIDTH 32
+#elif __SIZEOF_POINTER__ == 8
+ #define XLNK_SYS_BIT_WIDTH 64
+#endif
+
+#include <linux/types.h>
+
+#if XLNK_SYS_BIT_WIDTH == 32
+
+ typedef u32 xlnk_intptr_type;
+ typedef s32 xlnk_int_type;
+ typedef u32 xlnk_uint_type;
+ typedef u8 xlnk_byte_type;
+ typedef s8 xlnk_char_type;
+ #define xlnk_enum_type s32
+
+#elif XLNK_SYS_BIT_WIDTH == 64
+
+ typedef u64 xlnk_intptr_type;
+ typedef s32 xlnk_int_type;
+ typedef u32 xlnk_uint_type;
+ typedef u8 xlnk_byte_type;
+ typedef s8 xlnk_char_type;
+ #define xlnk_enum_type s32
+
+#else
+ #error "Please define application bit width and system bit width"
+#endif
+
+#endif
diff --git a/drivers/staging/apf/xlnk.c b/drivers/staging/apf/xlnk.c
new file mode 100644
index 000000000000..4701898cc5ec
--- /dev/null
+++ b/drivers/staging/apf/xlnk.c
@@ -0,0 +1,1580 @@
+/*
+ * xlnk.c
+ *
+ * Xilinx Accelerator driver support.
+ *
+ * Copyright (C) 2010 Xilinx Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* ----------------------------------- Host OS */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/dma-buf.h>
+
+#include <linux/string.h>
+
+#include <linux/uaccess.h>
+
+#include <linux/dmaengine.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/errno.h> /* error codes */
+#include <linux/dma-mapping.h> /* dma */
+#include <linux/of.h>
+#include <linux/list.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/uio_driver.h>
+#include <asm/cacheflush.h>
+#include <linux/semaphore.h>
+
+#include "xlnk-ioctl.h"
+#include "xlnk-sysdef.h"
+#include "xlnk.h"
+
+#ifdef CONFIG_XILINX_DMA_APF
+#include "xilinx-dma-apf.h"
+#endif
+
+#define DRIVER_NAME "xlnk"
+#define DRIVER_VERSION "0.2"
+
+static struct platform_device *xlnk_pdev;
+static struct device *xlnk_dev;
+
+static struct cdev xlnk_cdev;
+
+static struct class *xlnk_class;
+
+static s32 driver_major;
+
+static char *driver_name = DRIVER_NAME;
+
+static void *xlnk_dev_buf;
+static ssize_t xlnk_dev_size;
+static int xlnk_dev_vmas;
+
+#define XLNK_BUF_POOL_SIZE 4096
+static unsigned int xlnk_bufpool_size = XLNK_BUF_POOL_SIZE;
+static void *xlnk_bufpool[XLNK_BUF_POOL_SIZE];
+static void *xlnk_bufpool_alloc_point[XLNK_BUF_POOL_SIZE];
+static xlnk_intptr_type xlnk_userbuf[XLNK_BUF_POOL_SIZE];
+static int xlnk_buf_process[XLNK_BUF_POOL_SIZE];
+static dma_addr_t xlnk_phyaddr[XLNK_BUF_POOL_SIZE];
+static size_t xlnk_buflen[XLNK_BUF_POOL_SIZE];
+static unsigned int xlnk_bufcacheable[XLNK_BUF_POOL_SIZE];
+static spinlock_t xlnk_buf_lock;
+
+#define XLNK_IRQ_POOL_SIZE 256
+static struct xlnk_irq_control *xlnk_irq_set[XLNK_IRQ_POOL_SIZE];
+static spinlock_t xlnk_irq_lock;
+
+static int xlnk_open(struct inode *ip, struct file *filp);
+static int xlnk_release(struct inode *ip, struct file *filp);
+static long xlnk_ioctl(struct file *filp, unsigned int code,
+ unsigned long args);
+static ssize_t xlnk_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *offp);
+static ssize_t xlnk_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp);
+static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma);
+static void xlnk_vma_open(struct vm_area_struct *vma);
+static void xlnk_vma_close(struct vm_area_struct *vma);
+
+static int xlnk_init_bufpool(void);
+static void xlnk_init_irqpool(void);
+
+LIST_HEAD(xlnk_dmabuf_list);
+
+static int xlnk_shutdown(unsigned long buf);
+static int xlnk_recover_resource(unsigned long buf);
+
+static const struct file_operations xlnk_fops = {
+ .open = xlnk_open,
+ .release = xlnk_release,
+ .read = xlnk_read,
+ .write = xlnk_write,
+ .unlocked_ioctl = xlnk_ioctl,
+ .mmap = xlnk_mmap,
+};
+
+#define MAX_XLNK_DMAS 128
+
+struct xlnk_device_pack {
+ char name[64];
+ struct platform_device pdev;
+ struct resource res[8];
+ struct uio_info *io_ptr;
+ int refs;
+
+#ifdef CONFIG_XILINX_DMA_APF
+ struct xdma_channel_config dma_chan_cfg[4]; /* for xidane dma only */
+ struct xdma_device_config dma_dev_cfg; /* for xidane dma only */
+#endif
+};
+
+static struct semaphore xlnk_devpack_sem;
+static struct xlnk_device_pack *xlnk_devpacks[MAX_XLNK_DMAS];
+static void xlnk_devpacks_init(void)
+{
+ unsigned int i;
+
+ sema_init(&xlnk_devpack_sem, 1);
+ for (i = 0; i < MAX_XLNK_DMAS; i++)
+ xlnk_devpacks[i] = NULL;
+}
+
+static struct xlnk_device_pack *xlnk_devpacks_alloc(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++) {
+ if (!xlnk_devpacks[i]) {
+ struct xlnk_device_pack *ret;
+
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+ ret->pdev.id = i;
+ xlnk_devpacks[i] = ret;
+
+ return ret;
+ }
+ }
+
+ return NULL;
+}
+
+static void xlnk_devpacks_delete(struct xlnk_device_pack *devpack)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++)
+ if (xlnk_devpacks[i] == devpack)
+ xlnk_devpacks[i] = NULL;
+ kfree(devpack);
+}
+
+static struct xlnk_device_pack *xlnk_devpacks_find(xlnk_intptr_type base)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++) {
+ if (xlnk_devpacks[i] &&
+ xlnk_devpacks[i]->res[0].start == base)
+ return xlnk_devpacks[i];
+ }
+ return NULL;
+}
+
+static void xlnk_devpacks_free(xlnk_intptr_type base)
+{
+ struct xlnk_device_pack *devpack;
+
+ down(&xlnk_devpack_sem);
+ devpack = xlnk_devpacks_find(base);
+ if (!devpack) {
+ up(&xlnk_devpack_sem);
+ return;
+ }
+ devpack->refs--;
+ if (devpack->refs) {
+ up(&xlnk_devpack_sem);
+ return;
+ }
+ platform_device_unregister(&devpack->pdev);
+ xlnk_devpacks_delete(devpack);
+ kfree(devpack);
+ up(&xlnk_devpack_sem);
+}
+
+static void xlnk_devpacks_free_all(void)
+{
+ struct xlnk_device_pack *devpack;
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++) {
+ devpack = xlnk_devpacks[i];
+ if (devpack) {
+ if (devpack->io_ptr) {
+ uio_unregister_device(devpack->io_ptr);
+ kfree(devpack->io_ptr);
+ } else {
+ platform_device_unregister(&devpack->pdev);
+ }
+ xlnk_devpacks_delete(devpack);
+ kfree(devpack);
+ }
+ }
+}
+
+static int xlnk_probe(struct platform_device *pdev)
+{
+ int err;
+ dev_t dev = 0;
+
+ xlnk_dev_buf = NULL;
+ xlnk_dev_size = 0;
+ xlnk_dev_vmas = 0;
+
+ /* use 2.6 device model */
+ err = alloc_chrdev_region(&dev, 0, 1, driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "%s: Can't get major %d\n",
+ __func__, driver_major);
+ goto err1;
+ }
+
+ cdev_init(&xlnk_cdev, &xlnk_fops);
+
+ xlnk_cdev.owner = THIS_MODULE;
+
+ err = cdev_add(&xlnk_cdev, dev, 1);
+
+ if (err) {
+ dev_err(&pdev->dev, "%s: Failed to add XLNK device\n",
+ __func__);
+ goto err3;
+ }
+
+ /* udev support */
+ xlnk_class = class_create(THIS_MODULE, "xlnk");
+ if (IS_ERR(xlnk_class)) {
+ dev_err(xlnk_dev, "%s: Error creating xlnk class\n", __func__);
+ goto err3;
+ }
+
+ driver_major = MAJOR(dev);
+
+ dev_info(&pdev->dev, "Major %d\n", driver_major);
+
+ device_create(xlnk_class, NULL, MKDEV(driver_major, 0),
+ NULL, "xlnk");
+
+ err = xlnk_init_bufpool();
+ if (err) {
+ dev_err(&pdev->dev, "%s: Failed to allocate buffer pool\n",
+ __func__);
+ goto err3;
+ }
+
+ xlnk_init_irqpool();
+
+ dev_info(&pdev->dev, "%s driver loaded\n", DRIVER_NAME);
+
+ xlnk_pdev = pdev;
+ xlnk_dev = &pdev->dev;
+
+ if (xlnk_pdev)
+ dev_info(&pdev->dev, "xlnk_pdev is not null\n");
+ else
+ dev_info(&pdev->dev, "xlnk_pdev is null\n");
+
+ xlnk_devpacks_init();
+
+ return 0;
+err3:
+ cdev_del(&xlnk_cdev);
+ unregister_chrdev_region(dev, 1);
+err1:
+ return err;
+}
+
+static int xlnk_buf_findnull(void)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++) {
+ if (!xlnk_bufpool[i])
+ return i;
+ }
+
+ return 0;
+}
+
+static int xlnk_buf_find_by_phys_addr(xlnk_intptr_type addr)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++) {
+ if (xlnk_bufpool[i] &&
+ xlnk_phyaddr[i] <= addr &&
+ xlnk_phyaddr[i] + xlnk_buflen[i] > addr)
+ return i;
+ }
+
+ return 0;
+}
+
+static int xlnk_buf_find_by_user_addr(xlnk_intptr_type addr, int pid)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++) {
+ if (xlnk_bufpool[i] &&
+ xlnk_buf_process[i] == pid &&
+ xlnk_userbuf[i] <= addr &&
+ xlnk_userbuf[i] + xlnk_buflen[i] > addr)
+ return i;
+ }
+
+ return 0;
+}
+
+/*
+ * allocate and return an id
+ * id must be a positve number
+ */
+static int xlnk_allocbuf(unsigned int len, unsigned int cacheable)
+{
+ int id;
+ void *kaddr;
+ dma_addr_t phys_addr_anchor;
+ unsigned long attrs;
+
+ attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
+
+ kaddr = dma_alloc_attrs(xlnk_dev,
+ len,
+ &phys_addr_anchor,
+ GFP_KERNEL | GFP_DMA,
+ attrs);
+ if (!kaddr)
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ id = xlnk_buf_findnull();
+ if (id > 0 && id < XLNK_BUF_POOL_SIZE) {
+ xlnk_bufpool_alloc_point[id] = kaddr;
+ xlnk_bufpool[id] = kaddr;
+ xlnk_buflen[id] = len;
+ xlnk_bufcacheable[id] = cacheable;
+ xlnk_phyaddr[id] = phys_addr_anchor;
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ if (id <= 0 || id >= XLNK_BUF_POOL_SIZE)
+ return -ENOMEM;
+
+ return id;
+}
+
+static int xlnk_init_bufpool(void)
+{
+ unsigned int i;
+
+ spin_lock_init(&xlnk_buf_lock);
+ xlnk_dev_buf = kmalloc(8192, GFP_KERNEL | GFP_DMA);
+ *((char *)xlnk_dev_buf) = '\0';
+
+ if (!xlnk_dev_buf) {
+ dev_err(xlnk_dev, "%s: malloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ xlnk_bufpool[0] = xlnk_dev_buf;
+ for (i = 1; i < xlnk_bufpool_size; i++)
+ xlnk_bufpool[i] = NULL;
+
+ return 0;
+}
+
+static void xlnk_init_irqpool(void)
+{
+ int i;
+
+ spin_lock_init(&xlnk_irq_lock);
+ for (i = 0; i < XLNK_IRQ_POOL_SIZE; i++)
+ xlnk_irq_set[i] = NULL;
+}
+
+#define XLNK_SUSPEND NULL
+#define XLNK_RESUME NULL
+
+static int xlnk_remove(struct platform_device *pdev)
+{
+ dev_t devno;
+
+ kfree(xlnk_dev_buf);
+ xlnk_dev_buf = NULL;
+
+ devno = MKDEV(driver_major, 0);
+ cdev_del(&xlnk_cdev);
+ unregister_chrdev_region(devno, 1);
+ if (xlnk_class) {
+ /* remove the device from sysfs */
+ device_destroy(xlnk_class, MKDEV(driver_major, 0));
+ class_destroy(xlnk_class);
+ }
+
+ xlnk_devpacks_free_all();
+
+ return 0;
+}
+
+static const struct of_device_id xlnk_match[] = {
+ { .compatible = "xlnx,xlnk-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xlnk_match);
+
+static struct platform_driver xlnk_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xlnk_match,
+ },
+ .probe = xlnk_probe,
+ .remove = xlnk_remove,
+ .suspend = XLNK_SUSPEND,
+ .resume = XLNK_RESUME,
+};
+
+static u64 dma_mask = 0xFFFFFFFFFFFFFFFFull;
+
+/*
+ * This function is called when an application opens handle to the
+ * bridge driver.
+ */
+static int xlnk_open(struct inode *ip, struct file *filp)
+{
+ if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
+ xlnk_dev_size = 0;
+
+ return 0;
+}
+
+static ssize_t xlnk_read(struct file *filp,
+ char __user *buf,
+ size_t count,
+ loff_t *offp)
+{
+ ssize_t retval = 0;
+
+ if (*offp >= xlnk_dev_size)
+ goto out;
+
+ if (*offp + count > xlnk_dev_size)
+ count = xlnk_dev_size - *offp;
+
+ if (copy_to_user(buf, xlnk_dev_buf + *offp, count)) {
+ retval = -EFAULT;
+ goto out;
+ }
+ *offp += count;
+ retval = count;
+
+ out:
+ return retval;
+}
+
+static ssize_t xlnk_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ ssize_t retval = 0;
+
+ if (copy_from_user(xlnk_dev_buf + *offp, buf, count)) {
+ retval = -EFAULT;
+ goto out;
+ }
+ *offp += count;
+ retval = count;
+
+ if (xlnk_dev_size < *offp)
+ xlnk_dev_size = *offp;
+
+ out:
+ return retval;
+}
+
+/*
+ * This function is called when an application closes handle to the bridge
+ * driver.
+ */
+static int xlnk_release(struct inode *ip, struct file *filp)
+{
+ return 0;
+}
+
+static int xlnk_devregister(char *name,
+ unsigned int id,
+ xlnk_intptr_type base,
+ unsigned int size,
+ unsigned int *irqs,
+ xlnk_intptr_type *handle)
+{
+ unsigned int nres;
+ unsigned int nirq;
+ unsigned int *irqptr;
+ struct xlnk_device_pack *devpack;
+ unsigned int i;
+ int status;
+
+ down(&xlnk_devpack_sem);
+ devpack = xlnk_devpacks_find(base);
+ if (devpack) {
+ *handle = (xlnk_intptr_type)devpack;
+ devpack->refs++;
+ status = 0;
+ } else {
+ nirq = 0;
+ irqptr = irqs;
+
+ while (*irqptr) {
+ nirq++;
+ irqptr++;
+ }
+
+ if (nirq > 7) {
+ up(&xlnk_devpack_sem);
+ return -ENOMEM;
+ }
+
+ nres = nirq + 1;
+
+ devpack = xlnk_devpacks_alloc();
+ if (!devpack) {
+ up(&xlnk_devpack_sem);
+ pr_err("Failed to allocate device %s\n", name);
+ return -ENOMEM;
+ }
+ devpack->io_ptr = NULL;
+ strcpy(devpack->name, name);
+ devpack->pdev.name = devpack->name;
+
+ devpack->pdev.dev.dma_mask = &dma_mask;
+ devpack->pdev.dev.coherent_dma_mask = dma_mask;
+
+ devpack->res[0].start = base;
+ devpack->res[0].end = base + size - 1;
+ devpack->res[0].flags = IORESOURCE_MEM;
+
+ for (i = 0; i < nirq; i++) {
+ devpack->res[i + 1].start = irqs[i];
+ devpack->res[i + 1].end = irqs[i];
+ devpack->res[i + 1].flags = IORESOURCE_IRQ;
+ }
+
+ devpack->pdev.resource = devpack->res;
+ devpack->pdev.num_resources = nres;
+
+ status = platform_device_register(&devpack->pdev);
+ if (status) {
+ xlnk_devpacks_delete(devpack);
+ *handle = 0;
+ } else {
+ *handle = (xlnk_intptr_type)devpack;
+ }
+ }
+ up(&xlnk_devpack_sem);
+
+ return status;
+}
+
+static int xlnk_dmaregister(char *name,
+ unsigned int id,
+ xlnk_intptr_type base,
+ unsigned int size,
+ unsigned int chan_num,
+ unsigned int chan0_dir,
+ unsigned int chan0_irq,
+ unsigned int chan0_poll_mode,
+ unsigned int chan0_include_dre,
+ unsigned int chan0_data_width,
+ unsigned int chan1_dir,
+ unsigned int chan1_irq,
+ unsigned int chan1_poll_mode,
+ unsigned int chan1_include_dre,
+ unsigned int chan1_data_width,
+ xlnk_intptr_type *handle)
+{
+ int status = 0;
+
+#ifdef CONFIG_XILINX_DMA_APF
+
+ struct xlnk_device_pack *devpack;
+
+ if (chan_num < 1 || chan_num > 2) {
+ pr_err("%s: Expected either 1 or 2 channels, got %d\n",
+ __func__, chan_num);
+ return -EINVAL;
+ }
+
+ down(&xlnk_devpack_sem);
+ devpack = xlnk_devpacks_find(base);
+ if (devpack) {
+ *handle = (xlnk_intptr_type)devpack;
+ devpack->refs++;
+ status = 0;
+ } else {
+ devpack = xlnk_devpacks_alloc();
+ if (!devpack) {
+ up(&xlnk_devpack_sem);
+ return -ENOMEM;
+ }
+ strcpy(devpack->name, name);
+ devpack->pdev.name = "xilinx-axidma";
+
+ devpack->io_ptr = NULL;
+
+ devpack->dma_chan_cfg[0].include_dre = chan0_include_dre;
+ devpack->dma_chan_cfg[0].datawidth = chan0_data_width;
+ devpack->dma_chan_cfg[0].irq = chan0_irq;
+ devpack->dma_chan_cfg[0].poll_mode = chan0_poll_mode;
+ devpack->dma_chan_cfg[0].type =
+ (chan0_dir == XLNK_DMA_FROM_DEVICE) ?
+ "axi-dma-s2mm-channel" :
+ "axi-dma-mm2s-channel";
+
+ if (chan_num > 1) {
+ devpack->dma_chan_cfg[1].include_dre =
+ chan1_include_dre;
+ devpack->dma_chan_cfg[1].datawidth = chan1_data_width;
+ devpack->dma_chan_cfg[1].irq = chan1_irq;
+ devpack->dma_chan_cfg[1].poll_mode = chan1_poll_mode;
+ devpack->dma_chan_cfg[1].type =
+ (chan1_dir == XLNK_DMA_FROM_DEVICE) ?
+ "axi-dma-s2mm-channel" :
+ "axi-dma-mm2s-channel";
+ }
+
+ devpack->dma_dev_cfg.name = devpack->name;
+ devpack->dma_dev_cfg.type = "axi-dma";
+ devpack->dma_dev_cfg.include_sg = 1;
+ devpack->dma_dev_cfg.sg_include_stscntrl_strm = 1;
+ devpack->dma_dev_cfg.channel_count = chan_num;
+ devpack->dma_dev_cfg.channel_config = &devpack->dma_chan_cfg[0];
+
+ devpack->pdev.dev.platform_data = &devpack->dma_dev_cfg;
+
+ devpack->pdev.dev.dma_mask = &dma_mask;
+ devpack->pdev.dev.coherent_dma_mask = dma_mask;
+
+ devpack->res[0].start = base;
+ devpack->res[0].end = base + size - 1;
+ devpack->res[0].flags = IORESOURCE_MEM;
+
+ devpack->pdev.resource = devpack->res;
+ devpack->pdev.num_resources = 1;
+ status = platform_device_register(&devpack->pdev);
+ if (status) {
+ xlnk_devpacks_delete(devpack);
+ *handle = 0;
+ } else {
+ *handle = (xlnk_intptr_type)devpack;
+ }
+ }
+ up(&xlnk_devpack_sem);
+
+#endif
+ return status;
+}
+
+static int xlnk_allocbuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ xlnk_int_type id;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ id = xlnk_allocbuf(temp_args.allocbuf.len,
+ temp_args.allocbuf.cacheable);
+
+ if (id <= 0)
+ return -ENOMEM;
+
+ temp_args.allocbuf.id = id;
+ temp_args.allocbuf.phyaddr = (xlnk_intptr_type)(xlnk_phyaddr[id]);
+ status = copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args));
+
+ return status;
+}
+
+static int xlnk_freebuf(int id)
+{
+ void *alloc_point;
+ dma_addr_t p_addr;
+ size_t buf_len;
+ int cacheable;
+ unsigned long attrs;
+
+ if (id <= 0 || id >= xlnk_bufpool_size)
+ return -ENOMEM;
+
+ if (!xlnk_bufpool[id])
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ alloc_point = xlnk_bufpool_alloc_point[id];
+ p_addr = xlnk_phyaddr[id];
+ buf_len = xlnk_buflen[id];
+ xlnk_bufpool[id] = NULL;
+ xlnk_phyaddr[id] = (dma_addr_t)NULL;
+ xlnk_buflen[id] = 0;
+ cacheable = xlnk_bufcacheable[id];
+ xlnk_bufcacheable[id] = 0;
+ spin_unlock(&xlnk_buf_lock);
+
+ attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
+
+ dma_free_attrs(xlnk_dev,
+ buf_len,
+ alloc_point,
+ p_addr,
+ attrs);
+
+ return 0;
+}
+
+static void xlnk_free_all_buf(void)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++)
+ xlnk_freebuf(i);
+}
+
+static int xlnk_freebuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int id;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ id = temp_args.freebuf.id;
+ return xlnk_freebuf(id);
+}
+
+static int xlnk_adddmabuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ struct xlnk_dmabuf_reg *db;
+ int status;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ list_for_each_entry(db, &xlnk_dmabuf_list, list) {
+ if (db->user_vaddr == temp_args.dmasubmit.buf) {
+ pr_err("Attempting to register DMA-BUF for addr %llx that is already registered\n",
+ (unsigned long long)temp_args.dmabuf.user_addr);
+ spin_unlock(&xlnk_buf_lock);
+ return -EINVAL;
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ db = kzalloc(sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return -ENOMEM;
+
+ db->dmabuf_fd = temp_args.dmabuf.dmabuf_fd;
+ db->user_vaddr = temp_args.dmabuf.user_addr;
+ db->dbuf = dma_buf_get(db->dmabuf_fd);
+ db->dbuf_attach = dma_buf_attach(db->dbuf, xlnk_dev);
+ if (IS_ERR(db->dbuf_attach)) {
+ dma_buf_put(db->dbuf);
+ pr_err("Failed DMA-BUF attach\n");
+ return -EINVAL;
+ }
+
+ db->dbuf_sg_table = dma_buf_map_attachment(db->dbuf_attach,
+ DMA_BIDIRECTIONAL);
+
+ if (!db->dbuf_sg_table) {
+ pr_err("Failed DMA-BUF map_attachment\n");
+ dma_buf_detach(db->dbuf, db->dbuf_attach);
+ dma_buf_put(db->dbuf);
+ return -EINVAL;
+ }
+
+ spin_lock(&xlnk_buf_lock);
+ INIT_LIST_HEAD(&db->list);
+ list_add_tail(&db->list, &xlnk_dmabuf_list);
+ spin_unlock(&xlnk_buf_lock);
+
+ return 0;
+}
+
+static int xlnk_cleardmabuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ struct xlnk_dmabuf_reg *dp, *dp_temp;
+ int status;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ list_for_each_entry_safe(dp, dp_temp, &xlnk_dmabuf_list, list) {
+ if (dp->user_vaddr == temp_args.dmabuf.user_addr) {
+ dma_buf_unmap_attachment(dp->dbuf_attach,
+ dp->dbuf_sg_table,
+ DMA_BIDIRECTIONAL);
+ dma_buf_detach(dp->dbuf, dp->dbuf_attach);
+ dma_buf_put(dp->dbuf);
+ list_del(&dp->list);
+ spin_unlock(&xlnk_buf_lock);
+ kfree(dp);
+ return 0;
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+ pr_err("Attempting to unregister a DMA-BUF that was not registered at addr %llx\n",
+ (unsigned long long)temp_args.dmabuf.user_addr);
+
+ return 1;
+}
+
+static int xlnk_dmarequest_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+ int status;
+ struct xdma_chan *chan;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ if (!temp_args.dmarequest.name[0])
+ return 0;
+
+ down(&xlnk_devpack_sem);
+ chan = xdma_request_channel(temp_args.dmarequest.name);
+ up(&xlnk_devpack_sem);
+ if (!chan)
+ return -ENOMEM;
+ temp_args.dmarequest.dmachan = (xlnk_intptr_type)chan;
+ temp_args.dmarequest.bd_space_phys_addr = chan->bd_phys_addr;
+ temp_args.dmarequest.bd_space_size = chan->bd_chain_size;
+
+ if (copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args)))
+ return -EFAULT;
+
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+static int xlnk_dmasubmit_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+ struct xdma_head *dmahead;
+ struct xlnk_dmabuf_reg *dp, *cp = NULL;
+ int buf_id;
+ void *kaddr = NULL;
+ int status = -1;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ if (!temp_args.dmasubmit.dmachan)
+ return -ENODEV;
+
+ spin_lock(&xlnk_buf_lock);
+ buf_id = xlnk_buf_find_by_phys_addr(temp_args.dmasubmit.buf);
+ if (buf_id) {
+ xlnk_intptr_type addr_delta =
+ temp_args.dmasubmit.buf -
+ xlnk_phyaddr[buf_id];
+ kaddr = (u8 *)(xlnk_bufpool[buf_id]) + addr_delta;
+ } else {
+ list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
+ if (dp->user_vaddr == temp_args.dmasubmit.buf) {
+ cp = dp;
+ break;
+ }
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ status = xdma_submit((struct xdma_chan *)
+ (temp_args.dmasubmit.dmachan),
+ temp_args.dmasubmit.buf,
+ kaddr,
+ temp_args.dmasubmit.len,
+ temp_args.dmasubmit.nappwords_i,
+ temp_args.dmasubmit.appwords_i,
+ temp_args.dmasubmit.nappwords_o,
+ temp_args.dmasubmit.flag,
+ &dmahead,
+ cp);
+
+ temp_args.dmasubmit.dmahandle = (xlnk_intptr_type)dmahead;
+ temp_args.dmasubmit.last_bd_index =
+ (xlnk_intptr_type)dmahead->last_bd_index;
+
+ if (!status) {
+ if (copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args)))
+ return -EFAULT;
+ }
+ return status;
+#endif
+ return -ENOMEM;
+}
+
+static int xlnk_dmawait_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ int status = -1;
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+ struct xdma_head *dmahead;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ dmahead = (struct xdma_head *)temp_args.dmawait.dmahandle;
+ status = xdma_wait(dmahead,
+ dmahead->userflag,
+ &temp_args.dmawait.flags);
+ if (temp_args.dmawait.flags & XDMA_FLAGS_WAIT_COMPLETE) {
+ if (temp_args.dmawait.nappwords) {
+ memcpy(temp_args.dmawait.appwords,
+ dmahead->appwords_o,
+ dmahead->nappwords_o * sizeof(u32));
+ }
+ kfree(dmahead);
+ }
+ if (copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args)))
+ return -EFAULT;
+#endif
+
+ return status;
+}
+
+static int xlnk_dmarelease_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ int status = -1;
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+ down(&xlnk_devpack_sem);
+ xdma_release_channel((struct xdma_chan *)
+ (temp_args.dmarelease.dmachan));
+ up(&xlnk_devpack_sem);
+#endif
+
+ return status;
+}
+
+static int xlnk_devregister_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ xlnk_intptr_type handle;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ status = xlnk_devregister(temp_args.devregister.name,
+ temp_args.devregister.id,
+ temp_args.devregister.base,
+ temp_args.devregister.size,
+ temp_args.devregister.irqs,
+ &handle);
+
+ return status;
+}
+
+static int xlnk_dmaregister_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ xlnk_intptr_type handle;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ status = xlnk_dmaregister(temp_args.dmaregister.name,
+ temp_args.dmaregister.id,
+ temp_args.dmaregister.base,
+ temp_args.dmaregister.size,
+ temp_args.dmaregister.chan_num,
+ temp_args.dmaregister.chan0_dir,
+ temp_args.dmaregister.chan0_irq,
+ temp_args.dmaregister.chan0_poll_mode,
+ temp_args.dmaregister.chan0_include_dre,
+ temp_args.dmaregister.chan0_data_width,
+ temp_args.dmaregister.chan1_dir,
+ temp_args.dmaregister.chan1_irq,
+ temp_args.dmaregister.chan1_poll_mode,
+ temp_args.dmaregister.chan1_include_dre,
+ temp_args.dmaregister.chan1_data_width,
+ &handle);
+
+ return status;
+}
+
+static int xlnk_devunregister_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ xlnk_devpacks_free(temp_args.devunregister.base);
+
+ return 0;
+}
+
+static irqreturn_t xlnk_accel_isr(int irq, void *arg)
+{
+ struct xlnk_irq_control *irq_control = (struct xlnk_irq_control *)arg;
+
+ disable_irq_nosync(irq);
+ complete(&irq_control->cmp);
+
+ return IRQ_HANDLED;
+}
+
+static int xlnk_irq_register_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int i;
+ struct xlnk_irq_control *ctrl;
+ int irq_id = -1;
+ int irq_entry_new = 0;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(temp_args.irqregister));
+ if (status)
+ return -ENOMEM;
+
+ if (temp_args.irqregister.type !=
+ (XLNK_IRQ_LEVEL | XLNK_IRQ_ACTIVE_HIGH)) {
+ dev_err(xlnk_dev, "Unsupported interrupt type %x\n",
+ temp_args.irqregister.type);
+ return -EINVAL;
+ }
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->irq = xlate_irq(temp_args.irqregister.irq);
+ ctrl->enabled = 0;
+ init_completion(&ctrl->cmp);
+
+ spin_lock(&xlnk_irq_lock);
+ for (i = 0; i < XLNK_IRQ_POOL_SIZE; i++) {
+ if (!xlnk_irq_set[i] && irq_id == -1) {
+ irq_entry_new = 1;
+ irq_id = i;
+ xlnk_irq_set[i] = ctrl;
+ } else if (xlnk_irq_set[i] &&
+ xlnk_irq_set[i]->irq == ctrl->irq) {
+ irq_id = i;
+ break;
+ }
+ }
+ spin_unlock(&xlnk_irq_lock);
+
+ if (irq_id == -1) {
+ kfree(ctrl);
+ return -ENOMEM;
+ }
+
+ if (!irq_entry_new) {
+ kfree(ctrl);
+ } else {
+ status = request_irq(ctrl->irq,
+ xlnk_accel_isr,
+ IRQF_SHARED,
+ "xlnk",
+ ctrl);
+ if (status) {
+ enable_irq(ctrl->irq);
+ xlnk_irq_set[irq_id] = NULL;
+ kfree(ctrl);
+ return -EINVAL;
+ }
+ disable_irq_nosync(ctrl->irq);
+ }
+
+ temp_args.irqregister.irq_id = irq_id;
+
+ status = copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(temp_args.irqregister));
+
+ return status;
+}
+
+static int xlnk_irq_unregister_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int irq_id;
+ struct xlnk_irq_control *ctrl;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(union xlnk_args));
+ if (status)
+ return -ENOMEM;
+
+ irq_id = temp_args.irqunregister.irq_id;
+ if (irq_id < 0 || irq_id >= XLNK_IRQ_POOL_SIZE)
+ return -EINVAL;
+
+ ctrl = xlnk_irq_set[irq_id];
+ if (!ctrl)
+ return -EINVAL;
+
+ xlnk_irq_set[irq_id] = NULL;
+
+ if (ctrl->enabled) {
+ disable_irq_nosync(ctrl->irq);
+ complete(&ctrl->cmp);
+ }
+ free_irq(ctrl->irq, ctrl);
+ kfree(ctrl);
+
+ return 0;
+}
+
+static int xlnk_irq_wait_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int irq_id;
+ struct xlnk_irq_control *ctrl;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(temp_args.irqwait));
+ if (status)
+ return -ENOMEM;
+
+ irq_id = temp_args.irqwait.irq_id;
+ if (irq_id < 0 || irq_id >= XLNK_IRQ_POOL_SIZE)
+ return -EINVAL;
+
+ ctrl = xlnk_irq_set[irq_id];
+ if (!ctrl)
+ return -EINVAL;
+
+ if (!ctrl->enabled) {
+ ctrl->enabled = 1;
+ enable_irq(ctrl->irq);
+ }
+
+ if (temp_args.irqwait.polling) {
+ if (!try_wait_for_completion(&ctrl->cmp))
+ temp_args.irqwait.success = 0;
+ else
+ temp_args.irqwait.success = 1;
+ } else {
+ wait_for_completion(&ctrl->cmp);
+ temp_args.irqwait.success = 1;
+ }
+
+ if (temp_args.irqwait.success) {
+ reinit_completion(&ctrl->cmp);
+ ctrl->enabled = 0;
+ }
+
+ status = copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(temp_args.irqwait));
+
+ return status;
+}
+
+static int xlnk_cachecontrol_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status, size;
+ void *kaddr;
+ xlnk_intptr_type paddr;
+ int buf_id;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status) {
+ dev_err(xlnk_dev, "Error in copy_from_user. status = %d\n",
+ status);
+ return -ENOMEM;
+ }
+
+ if (!(temp_args.cachecontrol.action == 0 ||
+ temp_args.cachecontrol.action == 1)) {
+ dev_err(xlnk_dev, "Illegal action specified to cachecontrol_ioctl: %d\n",
+ temp_args.cachecontrol.action);
+ return -EINVAL;
+ }
+
+ size = temp_args.cachecontrol.size;
+ paddr = temp_args.cachecontrol.phys_addr;
+
+ spin_lock(&xlnk_buf_lock);
+ buf_id = xlnk_buf_find_by_phys_addr(paddr);
+ kaddr = xlnk_bufpool[buf_id];
+ spin_unlock(&xlnk_buf_lock);
+
+ if (buf_id == 0) {
+ pr_err("Illegal cachecontrol on non-sds_alloc memory");
+ return -EINVAL;
+ }
+
+#if XLNK_SYS_BIT_WIDTH == 32
+ __cpuc_flush_dcache_area(kaddr, size);
+ outer_flush_range(paddr, paddr + size);
+ if (temp_args.cachecontrol.action == 1)
+ outer_inv_range(paddr, paddr + size);
+#else
+ if (temp_args.cachecontrol.action == 1)
+ __dma_map_area(kaddr, size, DMA_FROM_DEVICE);
+ else
+ __dma_map_area(kaddr, size, DMA_TO_DEVICE);
+#endif
+ return 0;
+}
+
+static int xlnk_memop_ioctl(struct file *filp, unsigned long arg_addr)
+{
+ union xlnk_args args;
+ xlnk_intptr_type p_addr = 0;
+ int status = 0;
+ int buf_id;
+ struct xlnk_dmabuf_reg *cp = NULL;
+ int cacheable = 1;
+ enum dma_data_direction dmadir;
+ xlnk_intptr_type page_id;
+ unsigned int page_offset;
+ struct scatterlist sg;
+ unsigned long attrs = 0;
+
+ status = copy_from_user(&args,
+ (void __user *)arg_addr,
+ sizeof(union xlnk_args));
+
+ if (status) {
+ pr_err("Error in copy_from_user. status = %d\n", status);
+ return status;
+ }
+
+ if (!(args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) &&
+ !(args.memop.flags & XLNK_FLAG_MEM_RELEASE)) {
+ pr_err("memop lacks acquire or release flag\n");
+ return -EINVAL;
+ }
+
+ if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE &&
+ args.memop.flags & XLNK_FLAG_MEM_RELEASE) {
+ pr_err("memop has both acquire and release defined\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&xlnk_buf_lock);
+ buf_id = xlnk_buf_find_by_user_addr(args.memop.virt_addr,
+ current->pid);
+ if (buf_id > 0) {
+ cacheable = xlnk_bufcacheable[buf_id];
+ p_addr = xlnk_phyaddr[buf_id] +
+ (args.memop.virt_addr - xlnk_userbuf[buf_id]);
+ } else {
+ struct xlnk_dmabuf_reg *dp;
+
+ list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
+ if (dp->user_vaddr == args.memop.virt_addr) {
+ cp = dp;
+ break;
+ }
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ if (buf_id <= 0 && !cp) {
+ pr_err("Error, buffer not found\n");
+ return -EINVAL;
+ }
+
+ dmadir = (enum dma_data_direction)args.memop.dir;
+
+ if (args.memop.flags & XLNK_FLAG_COHERENT || !cacheable)
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (buf_id > 0) {
+ page_id = p_addr >> PAGE_SHIFT;
+ page_offset = p_addr - (page_id << PAGE_SHIFT);
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg,
+ pfn_to_page(page_id),
+ args.memop.size,
+ page_offset);
+ sg_dma_len(&sg) = args.memop.size;
+ }
+
+ if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) {
+ if (buf_id > 0) {
+ status = get_dma_ops(xlnk_dev)->map_sg(xlnk_dev,
+ &sg,
+ 1,
+ dmadir,
+ attrs);
+ if (!status) {
+ pr_err("Failed to map address\n");
+ return -EINVAL;
+ }
+ args.memop.phys_addr = (xlnk_intptr_type)
+ sg_dma_address(&sg);
+ args.memop.token = (xlnk_intptr_type)
+ sg_dma_address(&sg);
+ status = copy_to_user((void __user *)arg_addr,
+ &args,
+ sizeof(union xlnk_args));
+ if (status)
+ pr_err("Error in copy_to_user. status = %d\n",
+ status);
+ } else {
+ if (cp->dbuf_sg_table->nents != 1) {
+ pr_err("Non-SG-DMA datamovers require physically contiguous DMABUFs. DMABUF is not physically contiguous\n");
+ return -EINVAL;
+ }
+ args.memop.phys_addr = (xlnk_intptr_type)
+ sg_dma_address(cp->dbuf_sg_table->sgl);
+ args.memop.token = 0;
+ status = copy_to_user((void __user *)arg_addr,
+ &args,
+ sizeof(union xlnk_args));
+ if (status)
+ pr_err("Error in copy_to_user. status = %d\n",
+ status);
+ }
+ } else {
+ if (buf_id > 0) {
+ sg_dma_address(&sg) = (dma_addr_t)args.memop.token;
+ get_dma_ops(xlnk_dev)->unmap_sg(xlnk_dev,
+ &sg,
+ 1,
+ dmadir,
+ attrs);
+ }
+ }
+
+ return status;
+}
+
+/* This function provides IO interface to the bridge driver. */
+static long xlnk_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ if (_IOC_TYPE(code) != XLNK_IOC_MAGIC)
+ return -ENOTTY;
+ if (_IOC_NR(code) > XLNK_IOC_MAXNR)
+ return -ENOTTY;
+
+ /* some sanity check */
+ switch (code) {
+ case XLNK_IOCALLOCBUF:
+ return xlnk_allocbuf_ioctl(filp, code, args);
+ case XLNK_IOCFREEBUF:
+ return xlnk_freebuf_ioctl(filp, code, args);
+ case XLNK_IOCADDDMABUF:
+ return xlnk_adddmabuf_ioctl(filp, code, args);
+ case XLNK_IOCCLEARDMABUF:
+ return xlnk_cleardmabuf_ioctl(filp, code, args);
+ case XLNK_IOCDMAREQUEST:
+ return xlnk_dmarequest_ioctl(filp, code, args);
+ case XLNK_IOCDMASUBMIT:
+ return xlnk_dmasubmit_ioctl(filp, code, args);
+ case XLNK_IOCDMAWAIT:
+ return xlnk_dmawait_ioctl(filp, code, args);
+ case XLNK_IOCDMARELEASE:
+ return xlnk_dmarelease_ioctl(filp, code, args);
+ case XLNK_IOCDEVREGISTER:
+ return xlnk_devregister_ioctl(filp, code, args);
+ case XLNK_IOCDMAREGISTER:
+ return xlnk_dmaregister_ioctl(filp, code, args);
+ case XLNK_IOCDEVUNREGISTER:
+ return xlnk_devunregister_ioctl(filp, code, args);
+ case XLNK_IOCCACHECTRL:
+ return xlnk_cachecontrol_ioctl(filp, code, args);
+ case XLNK_IOCIRQREGISTER:
+ return xlnk_irq_register_ioctl(filp, code, args);
+ case XLNK_IOCIRQUNREGISTER:
+ return xlnk_irq_unregister_ioctl(filp, code, args);
+ case XLNK_IOCIRQWAIT:
+ return xlnk_irq_wait_ioctl(filp, code, args);
+ case XLNK_IOCSHUTDOWN:
+ return xlnk_shutdown(args);
+ case XLNK_IOCRECRES:
+ return xlnk_recover_resource(args);
+ case XLNK_IOCMEMOP:
+ return xlnk_memop_ioctl(filp, args);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct vm_operations_struct xlnk_vm_ops = {
+ .open = xlnk_vma_open,
+ .close = xlnk_vma_close,
+};
+
+/* This function maps kernel space memory to user space memory. */
+static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int bufid;
+ int status;
+
+ bufid = vma->vm_pgoff >> (16 - PAGE_SHIFT);
+
+ if (bufid == 0) {
+ unsigned long paddr = virt_to_phys(xlnk_dev_buf);
+
+ status = remap_pfn_range(vma,
+ vma->vm_start,
+ paddr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ } else {
+ if (xlnk_bufcacheable[bufid] == 0)
+ vma->vm_page_prot =
+ pgprot_noncached(vma->vm_page_prot);
+ status = remap_pfn_range(vma, vma->vm_start,
+ xlnk_phyaddr[bufid]
+ >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ xlnk_userbuf[bufid] = vma->vm_start;
+ xlnk_buf_process[bufid] = current->pid;
+ }
+ if (status) {
+ pr_err("%s failed with code %d\n", __func__, status);
+ return status;
+ }
+
+ xlnk_vma_open(vma);
+ vma->vm_ops = &xlnk_vm_ops;
+ vma->vm_private_data = xlnk_bufpool[bufid];
+
+ return 0;
+}
+
+static void xlnk_vma_open(struct vm_area_struct *vma)
+{
+ xlnk_dev_vmas++;
+}
+
+static void xlnk_vma_close(struct vm_area_struct *vma)
+{
+ xlnk_dev_vmas--;
+}
+
+static int xlnk_shutdown(unsigned long buf)
+{
+ return 0;
+}
+
+static int xlnk_recover_resource(unsigned long buf)
+{
+ xlnk_free_all_buf();
+#ifdef CONFIG_XILINX_DMA_APF
+ xdma_release_all_channels();
+#endif
+ return 0;
+}
+
+module_platform_driver(xlnk_driver);
+
+MODULE_DESCRIPTION("Xilinx APF driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/apf/xlnk.h b/drivers/staging/apf/xlnk.h
new file mode 100644
index 000000000000..cbc2334c2e82
--- /dev/null
+++ b/drivers/staging/apf/xlnk.h
@@ -0,0 +1,175 @@
+#ifndef _XLNK_OS_H
+#define _XLNK_OS_H
+
+#include <linux/stddef.h>
+#include <linux/dmaengine.h>
+#include "xilinx-dma-apf.h"
+#include "xlnk-sysdef.h"
+
+#define XLNK_FLAG_COHERENT 0x00000001
+#define XLNK_FLAG_KERNEL_BUFFER 0x00000002
+#define XLNK_FLAG_DMAPOLLING 0x00000004
+#define XLNK_FLAG_IOMMU_VALID 0x00000008
+#define XLNK_FLAG_PHYSICAL_ADDR 0x00000100
+#define XLNK_FLAG_VIRTUAL_ADDR 0x00000200
+#define XLNK_FLAG_MEM_ACQUIRE 0x00001000
+#define XLNK_FLAG_MEM_RELEASE 0x00002000
+#define CF_FLAG_CACHE_FLUSH_INVALIDATE 0x00000001
+#define CF_FLAG_PHYSICALLY_CONTIGUOUS 0x00000002
+#define CF_FLAG_DMAPOLLING 0x00000004
+#define XLNK_IRQ_LEVEL 0x00000001
+#define XLNK_IRQ_EDGE 0x00000002
+#define XLNK_IRQ_ACTIVE_HIGH 0x00000004
+#define XLNK_IRQ_ACTIVE_LOW 0x00000008
+#define XLNK_IRQ_RESET_REG_VALID 0x00000010
+
+enum xlnk_dma_direction {
+ XLNK_DMA_BI = 0,
+ XLNK_DMA_TO_DEVICE = 1,
+ XLNK_DMA_FROM_DEVICE = 2,
+ XLNK_DMA_NONE = 3,
+};
+
+struct xlnk_dma_transfer_handle {
+ dma_addr_t dma_addr;
+ unsigned long transfer_length;
+ void *kern_addr;
+ unsigned long user_addr;
+ enum dma_data_direction transfer_direction;
+ int sg_effective_length;
+ int flags;
+ struct dma_chan *channel;
+ dma_cookie_t dma_cookie;
+ struct dma_async_tx_descriptor *async_desc;
+ struct completion completion_handle;
+};
+
+struct xlnk_dmabuf_reg {
+ xlnk_int_type dmabuf_fd;
+ xlnk_intptr_type user_vaddr;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *dbuf_attach;
+ struct sg_table *dbuf_sg_table;
+ int is_mapped;
+ int dma_direction;
+ struct list_head list;
+};
+
+struct xlnk_irq_control {
+ int irq;
+ int enabled;
+ struct completion cmp;
+};
+
+/* CROSSES KERNEL-USER BOUNDARY */
+union xlnk_args {
+ struct __attribute__ ((__packed__)) {
+ xlnk_uint_type len;
+ xlnk_int_type id;
+ xlnk_intptr_type phyaddr;
+ xlnk_byte_type cacheable;
+ } allocbuf;
+ struct __attribute__ ((__packed__)) {
+ xlnk_uint_type id;
+ xlnk_intptr_type buf;
+ } freebuf;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type dmabuf_fd;
+ xlnk_intptr_type user_addr;
+ } dmabuf;
+ struct __attribute__ ((__packed__)) {
+ xlnk_char_type name[64];
+ xlnk_intptr_type dmachan;
+ xlnk_uint_type bd_space_phys_addr;
+ xlnk_uint_type bd_space_size;
+ } dmarequest;
+#define XLNK_MAX_APPWORDS 5
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type dmachan;
+ xlnk_intptr_type buf;
+ xlnk_intptr_type buf2;
+ xlnk_uint_type buf_offset;
+ xlnk_uint_type len;
+ xlnk_uint_type bufflag;
+ xlnk_intptr_type sglist;
+ xlnk_uint_type sgcnt;
+ xlnk_enum_type dmadir;
+ xlnk_uint_type nappwords_i;
+ xlnk_uint_type appwords_i[XLNK_MAX_APPWORDS];
+ xlnk_uint_type nappwords_o;
+ xlnk_uint_type flag;
+ xlnk_intptr_type dmahandle; /* return value */
+ xlnk_uint_type last_bd_index;
+ } dmasubmit;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type dmahandle;
+ xlnk_uint_type nappwords;
+ xlnk_uint_type appwords[XLNK_MAX_APPWORDS];
+ /* appwords array we only accept 5 max */
+ xlnk_uint_type flags;
+ } dmawait;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type dmachan;
+ } dmarelease;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type base;
+ xlnk_uint_type size;
+ xlnk_uint_type irqs[8];
+ xlnk_char_type name[32];
+ xlnk_uint_type id;
+ } devregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type base;
+ } devunregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_char_type name[32];
+ xlnk_uint_type id;
+ xlnk_intptr_type base;
+ xlnk_uint_type size;
+ xlnk_uint_type chan_num;
+ xlnk_uint_type chan0_dir;
+ xlnk_uint_type chan0_irq;
+ xlnk_uint_type chan0_poll_mode;
+ xlnk_uint_type chan0_include_dre;
+ xlnk_uint_type chan0_data_width;
+ xlnk_uint_type chan1_dir;
+ xlnk_uint_type chan1_irq;
+ xlnk_uint_type chan1_poll_mode;
+ xlnk_uint_type chan1_include_dre;
+ xlnk_uint_type chan1_data_width;
+ } dmaregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type phys_addr;
+ xlnk_uint_type size;
+ xlnk_int_type action;
+ } cachecontrol;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type virt_addr;
+ xlnk_int_type size;
+ xlnk_enum_type dir;
+ xlnk_int_type flags;
+ xlnk_intptr_type phys_addr;
+ xlnk_intptr_type token;
+ } memop;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type irq;
+ xlnk_int_type subirq;
+ xlnk_uint_type type;
+ xlnk_intptr_type control_base;
+ xlnk_intptr_type reset_reg_base;
+ xlnk_uint_type reset_offset;
+ xlnk_uint_type reset_valid_high;
+ xlnk_uint_type reset_valid_low;
+ xlnk_int_type irq_id;
+ } irqregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type irq_id;
+ } irqunregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type irq_id;
+ xlnk_int_type polling;
+ xlnk_int_type success;
+ } irqwait;
+};
+
+#endif
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index 15b7a82f4b1e..eb329e0cdc84 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -26,19 +26,40 @@
#define WZRD_CLKFBOUT_MULT_SHIFT 8
#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
+#define WZRD_CLKFBOUT_FRAC_SHIFT 16
+#define WZRD_CLKFBOUT_FRAC_MASK (0x3ff << WZRD_CLKFBOUT_FRAC_SHIFT)
#define WZRD_DIVCLK_DIVIDE_SHIFT 0
#define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
#define WZRD_CLKOUT_DIVIDE_SHIFT 0
+#define WZRD_CLKOUT_DIVIDE_WIDTH 8
#define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
+#define WZRD_CLKOUT_FRAC_SHIFT 8
+#define WZRD_CLKOUT_FRAC_MASK 0x3ff
+
+#define WZRD_DR_MAX_INT_DIV_VALUE 255
+#define WZRD_DR_NUM_RETRIES 10000
+#define WZRD_DR_STATUS_REG_OFFSET 0x04
+#define WZRD_DR_LOCK_BIT_MASK 0x00000001
+#define WZRD_DR_INIT_REG_OFFSET 0x25C
+#define WZRD_DR_DIV_TO_PHASE_OFFSET 4
+#define WZRD_DR_BEGIN_DYNA_RECONF 0x03
+
+/* Get the mask from width */
+#define div_mask(width) ((1 << (width)) - 1)
+
+/* Extract divider instance from clock hardware instance */
+#define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw)
enum clk_wzrd_int_clks {
wzrd_clk_mul,
wzrd_clk_mul_div,
+ wzrd_clk_mul_frac,
wzrd_clk_int_max
};
/**
- * struct clk_wzrd:
+ * struct clk_wzrd - Clock wizard private data structure
+ *
* @clk_data: Clock data
* @nb: Notifier block
* @base: Memory base
@@ -61,6 +82,29 @@ struct clk_wzrd {
bool suspended;
};
+/**
+ * struct clk_wzrd_divider - clock divider specific to clk_wzrd
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @base: base address of register containing the divider
+ * @offset: offset address of register containing the divider
+ * @shift: shift to the divider bit field
+ * @width: width of the divider bit field
+ * @flags: clk_wzrd divider flags
+ * @table: array of value/divider pairs, last entry should have div = 0
+ * @lock: register lock
+ */
+struct clk_wzrd_divider {
+ struct clk_hw hw;
+ void __iomem *base;
+ u16 offset;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ const struct clk_div_table *table;
+ spinlock_t *lock; /* divider lock */
+};
+
#define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
/* maximum frequencies for input/output clocks per speed grade */
@@ -70,6 +114,319 @@ static const unsigned long clk_wzrd_max_freq[] = {
1066000000UL
};
+/* spin lock variable for clk_wzrd */
+static DEFINE_SPINLOCK(clkwzrd_lock);
+
+static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+ unsigned int val;
+
+ val = readl(div_addr) >> divider->shift;
+ val &= div_mask(divider->width);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+}
+
+static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int err = 0;
+ u16 retries;
+ u32 value;
+ unsigned long flags = 0;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ value = DIV_ROUND_CLOSEST(parent_rate, rate);
+
+ /* Cap the value to max */
+ if (value > WZRD_DR_MAX_INT_DIV_VALUE)
+ value = WZRD_DR_MAX_INT_DIV_VALUE;
+
+ /* Set divisor and clear phase offset */
+ writel(value, div_addr);
+ writel(0x00, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (retries == 0) {
+ err = -ETIMEDOUT;
+ goto err_reconfig;
+ }
+
+ /* Initiate reconfiguration */
+ writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (retries == 0)
+ err = -ETIMEDOUT;
+
+err_reconfig:
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return err;
+}
+
+static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u8 div;
+
+ /*
+ * since we donot change parent rate we just round rate to closest
+ * achievable
+ */
+ div = DIV_ROUND_CLOSEST(*prate, rate);
+
+ return (*prate / div);
+}
+
+static const struct clk_ops clk_wzrd_clk_divider_ops = {
+ .round_rate = clk_wzrd_round_rate,
+ .set_rate = clk_wzrd_dynamic_reconfig,
+ .recalc_rate = clk_wzrd_recalc_rate,
+};
+
+static unsigned long clk_wzrd_recalc_ratef(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned int val;
+ u32 div, frac;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+
+ val = readl(div_addr);
+ div = val & div_mask(divider->width);
+ frac = (val >> WZRD_CLKOUT_FRAC_SHIFT) & WZRD_CLKOUT_FRAC_MASK;
+
+ return ((parent_rate * 1000) / ((div * 1000) + frac));
+}
+
+static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int err = 0;
+ u16 retries;
+ u32 value, pre;
+ unsigned long flags = 0;
+ unsigned long rate_div, f, clockout0_div;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ rate_div = ((parent_rate * 1000) / rate);
+ clockout0_div = rate_div / 1000;
+
+ pre = DIV_ROUND_CLOSEST((parent_rate * 1000), rate);
+ f = (u32)(pre - (clockout0_div * 1000));
+ f = f & WZRD_CLKOUT_FRAC_MASK;
+
+ value = ((f << WZRD_CLKOUT_DIVIDE_WIDTH) | (clockout0_div &
+ WZRD_CLKOUT_DIVIDE_MASK));
+
+ /* Set divisor and clear phase offset */
+ writel(value, div_addr);
+ writel(0x0, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (!retries) {
+ err = -ETIMEDOUT;
+ goto err_reconfig;
+ }
+
+ /* Initiate reconfiguration */
+ writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (!retries)
+ err = -ETIMEDOUT;
+
+err_reconfig:
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return err;
+}
+
+static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return rate;
+}
+
+static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
+ .round_rate = clk_wzrd_round_rate_f,
+ .set_rate = clk_wzrd_dynamic_reconfig_f,
+ .recalc_rate = clk_wzrd_recalc_ratef,
+};
+
+static struct clk *clk_wzrd_register_divf(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *base, u16 offset,
+ u8 shift, u8 width,
+ u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_wzrd_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
+ if (width + shift > 16) {
+ pr_warn("divider value exceeds LOWORD field\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_wzrd_clk_divider_ops_f;
+
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->base = base;
+ div->offset = offset;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ return ERR_PTR(ret);
+ }
+
+ return hw->clk;
+}
+
+static struct clk *clk_wzrd_register_divider(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *base, u16 offset,
+ u8 shift, u8 width,
+ u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_wzrd_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
+ if (width + shift > 16) {
+ pr_warn("divider value exceeds LOWORD field\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_wzrd_clk_divider_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->base = base;
+ div->offset = offset;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw->clk;
+}
+
static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
void *data)
{
@@ -131,7 +488,7 @@ static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
static int clk_wzrd_probe(struct platform_device *pdev)
{
int i, ret;
- u32 reg;
+ u32 reg, reg_f, mult;
unsigned long rate;
const char *clk_name;
struct clk_wzrd *clk_wzrd;
@@ -183,17 +540,13 @@ static int clk_wzrd_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- /* we don't support fractional div/mul yet */
- reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
- WZRD_CLKFBOUT_FRAC_EN;
- reg |= readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2)) &
- WZRD_CLKOUT0_FRAC_EN;
- if (reg)
- dev_warn(&pdev->dev, "fractional div/mul not supported\n");
-
/* register multiplier */
reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
WZRD_CLKFBOUT_MULT_MASK) >> WZRD_CLKFBOUT_MULT_SHIFT;
+ reg_f = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
+ WZRD_CLKFBOUT_FRAC_MASK) >> WZRD_CLKFBOUT_FRAC_SHIFT;
+
+ mult = ((reg * 1000) + reg_f);
clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
if (!clk_name) {
ret = -ENOMEM;
@@ -202,7 +555,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
(&pdev->dev, clk_name,
__clk_get_name(clk_wzrd->clk_in1),
- 0, reg, 1);
+ 0, mult, 1000);
kfree(clk_name);
if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
@@ -240,11 +593,24 @@ static int clk_wzrd_probe(struct platform_device *pdev)
ret = -EINVAL;
goto err_rm_int_clks;
}
- reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2) + i * 12);
- reg &= WZRD_CLKOUT_DIVIDE_MASK;
- reg >>= WZRD_CLKOUT_DIVIDE_SHIFT;
- clk_wzrd->clkout[i] = clk_register_fixed_factor
- (&pdev->dev, clkout_name, clk_name, 0, 1, reg);
+ if (!i)
+ clk_wzrd->clkout[i] = clk_wzrd_register_divf
+ (&pdev->dev, clkout_name,
+ clk_name, 0,
+ clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ NULL, &clkwzrd_lock);
+ else
+ clk_wzrd->clkout[i] = clk_wzrd_register_divider
+ (&pdev->dev, clkout_name,
+ clk_name, 0,
+ clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ NULL, &clkwzrd_lock);
if (IS_ERR(clk_wzrd->clkout[i])) {
int j;
diff --git a/drivers/staging/clocking-wizard/dt-binding.txt b/drivers/staging/clocking-wizard/dt-binding.txt
index 723271e93316..0439af67930b 100644
--- a/drivers/staging/clocking-wizard/dt-binding.txt
+++ b/drivers/staging/clocking-wizard/dt-binding.txt
@@ -9,6 +9,7 @@ http://www.xilinx.com/support/documentation/ip_documentation/clk_wiz/v5_1/pg065-
Required properties:
- compatible: Must be 'xlnx,clocking-wizard'
+ - #clock-cells: Number of cells in a clock specifier. Should be 1
- reg: Base and size of the cores register space
- clocks: Handle to input clock
- clock-names: Tuple containing 'clk_in1' and 's_axi_aclk'
@@ -19,12 +20,13 @@ Optional properties:
Example:
clock-generator@40040000 {
+ #clock-cells = <1>;
reg = <0x40040000 0x1000>;
compatible = "xlnx,clocking-wizard";
speed-grade = <1>;
clock-names = "clk_in1", "s_axi_aclk";
clocks = <&clkc 15>, <&clkc 15>;
- clock-output-names = "clk_out0", "clk_out1", "clk_out2",
+ clock-output-names = "clk_out1", "clk_out2",
"clk_out3", "clk_out4", "clk_out5",
"clk_out6", "clk_out7";
};
diff --git a/drivers/staging/fclk/Kconfig b/drivers/staging/fclk/Kconfig
new file mode 100644
index 000000000000..5f68261a206d
--- /dev/null
+++ b/drivers/staging/fclk/Kconfig
@@ -0,0 +1,9 @@
+#
+# Xilinx PL clk enabler
+#
+
+config XILINX_FCLK
+ tristate "Xilinx PL clock enabler"
+ depends on COMMON_CLK && OF
+ ---help---
+ Support for the Xilinx fclk clock enabler.
diff --git a/drivers/staging/fclk/Makefile b/drivers/staging/fclk/Makefile
new file mode 100644
index 000000000000..71723036c94e
--- /dev/null
+++ b/drivers/staging/fclk/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XILINX_FCLK) += xilinx_fclk.o
diff --git a/drivers/staging/fclk/TODO b/drivers/staging/fclk/TODO
new file mode 100644
index 000000000000..912325fe5f4d
--- /dev/null
+++ b/drivers/staging/fclk/TODO
@@ -0,0 +1,2 @@
+TODO:
+ - Remove this hack and clock adapt all the drivers.
diff --git a/drivers/staging/fclk/dt-binding.txt b/drivers/staging/fclk/dt-binding.txt
new file mode 100644
index 000000000000..23521608b4a8
--- /dev/null
+++ b/drivers/staging/fclk/dt-binding.txt
@@ -0,0 +1,16 @@
+Binding for Xilinx pl clocks
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+ - compatible: Must be 'xlnx,fclk'
+ - clocks: Handle to input clock
+
+Example:
+ fclk3: fclk3 {
+ status = "disabled";
+ compatible = "xlnx,fclk";
+ clocks = <&clkc 71>;
+ };
diff --git a/drivers/staging/fclk/xilinx_fclk.c b/drivers/staging/fclk/xilinx_fclk.c
new file mode 100644
index 000000000000..189928b8dd79
--- /dev/null
+++ b/drivers/staging/fclk/xilinx_fclk.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+struct fclk_state {
+ struct device *dev;
+ struct clk *pl;
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id fclk_of_match[] = {
+ { .compatible = "xlnx,fclk",},
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, fclk_of_match);
+
+static ssize_t set_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fclk_state *st = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", clk_get_rate(st->pl));
+}
+
+static ssize_t set_rate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret = 0;
+ unsigned long rate;
+ struct fclk_state *st = dev_get_drvdata(dev);
+
+ ret = kstrtoul(buf, 0, &rate);
+ if (ret)
+ return -EINVAL;
+
+ rate = clk_round_rate(st->pl, rate);
+ ret = clk_set_rate(st->pl, rate);
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR_RW(set_rate);
+
+static const struct attribute *fclk_ctrl_attrs[] = {
+ &dev_attr_set_rate.attr,
+ NULL,
+};
+
+static const struct attribute_group fclk_ctrl_attr_grp = {
+ .attrs = (struct attribute **)fclk_ctrl_attrs,
+};
+
+static int fclk_probe(struct platform_device *pdev)
+{
+ struct fclk_state *st;
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->dev = dev;
+ platform_set_drvdata(pdev, st);
+
+ st->pl = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(st->pl))
+ return PTR_ERR(st->pl);
+
+ ret = clk_prepare_enable(st->pl);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+
+ ret = sysfs_create_group(&dev->kobj, &fclk_ctrl_attr_grp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int fclk_remove(struct platform_device *pdev)
+{
+ struct fclk_state *st = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(st->pl);
+ return 0;
+}
+
+static struct platform_driver fclk_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fclk_of_match,
+ },
+ .probe = fclk_probe,
+ .remove = fclk_remove,
+};
+
+module_platform_driver(fclk_driver);
+
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>");
+MODULE_DESCRIPTION("fclk enable");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/xlnx_ctrl_driver/Kconfig b/drivers/staging/xlnx_ctrl_driver/Kconfig
new file mode 100644
index 000000000000..3bff5e6d1aca
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/Kconfig
@@ -0,0 +1,15 @@
+config XLNX_CTRL_FRMBUF
+ tristate "FB Control driver"
+ help
+ This driver is to support Xilinx Framebuffer read and write IP. This
+ driver is simple control plane driver which is controlled by ioctls
+ from userspace. It is free from any other media framework like V4l2 or
+ DRM hence, doesn't need to adhere to V4L2 or DRM.
+
+config XLNX_CTRL_VPSS
+ tristate "VPSS Control driver"
+ help
+ This driver is to support Xilinx VPSS IP. This driver is simple
+ control plane driver which is controlled by ioctls from userspace. It
+ is free from any media framework like V4l2 or DRM hence, doesn't need
+ to adhere to V4L2 or DRM.
diff --git a/drivers/staging/xlnx_ctrl_driver/MAINTAINERS b/drivers/staging/xlnx_ctrl_driver/MAINTAINERS
new file mode 100644
index 000000000000..bcfd70d359ec
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX CONTROL DRIVER
+M: Saurabh Sengar <saurabh.singh@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnx_ctrl_driver
diff --git a/drivers/staging/xlnx_ctrl_driver/Makefile b/drivers/staging/xlnx_ctrl_driver/Makefile
new file mode 100644
index 000000000000..312bd1f5d233
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_XLNX_CTRL_FRMBUF) += xlnx_frmb.o
+obj-$(CONFIG_XLNX_CTRL_VPSS) += xlnx_vpss.o
diff --git a/drivers/staging/xlnx_ctrl_driver/xlnx_frmb.c b/drivers/staging/xlnx_ctrl_driver/xlnx_frmb.c
new file mode 100644
index 000000000000..0b36575e493b
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/xlnx_frmb.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx Framebuffer read control driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabh.singh@xilinx.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/xlnx_ctrl.h>
+
+/* TODO: clock framework */
+
+#define XFBWR_FB_CTRL 0x00
+#define XFBWR_FB_WIDTH 0x10
+#define XFBWR_FB_HEIGHT 0x18
+#define XFBWR_FB_STRIDE 0x20
+#define XFBWR_FB_COLOR 0x28
+#define XFBWR_FB_PLANE1 0x30
+#define XFBWR_FB_PLANE2 0x3C
+
+#define XFBWR_FB_CTRL_START BIT(0)
+#define XFBWR_FB_CTRL_IDLE BIT(2)
+#define XFBWR_FB_CTRL_RESTART BIT(7)
+#define XFBWR_FB_CTRL_OFF 0
+
+static u64 dma_mask = -1ULL;
+
+struct frmb_dmabuf_reg {
+ s32 dmabuf_fd;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *dbuf_attach;
+ struct sg_table *dbuf_sg_table;
+};
+
+/**
+ * struct frmb_struct - Xilinx framebuffer ctrl object
+ *
+ * @dev: device structure
+ * @db: framebuffer ctrl driver dmabuf structure
+ * @frmb_miscdev: The misc device registered
+ * @regs: Base address of framebuffer IP
+ * @is_fbrd: True for framebuffer Read else false
+ */
+struct frmb_struct {
+ struct device *dev;
+ struct frmb_dmabuf_reg db;
+ struct miscdevice frmb_miscdev;
+ void __iomem *regs;
+ bool is_fbrd;
+};
+
+struct frmb_data {
+ u32 fd;
+ u32 height;
+ u32 width;
+ u32 stride;
+ u32 color;
+ u32 n_planes;
+ u32 offset;
+};
+
+struct match_struct {
+ char name[8];
+ bool is_read;
+};
+
+static const struct match_struct read_struct = {
+ .name = "fbrd",
+ .is_read = true,
+};
+
+static const struct match_struct write_struct = {
+ .name = "fbwr",
+ .is_read = false,
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id frmb_of_match[] = {
+ { .compatible = "xlnx,ctrl-fbwr-1.0", .data = &write_struct},
+ { .compatible = "xlnx,ctrl-fbrd-1.0", .data = &read_struct},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, frmb_of_match);
+
+static inline struct frmb_struct *to_frmb_struct(struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+
+ return container_of(miscdev, struct frmb_struct, frmb_miscdev);
+}
+
+static inline u32 frmb_ior(void __iomem *lp, off_t offset)
+{
+ return readl(lp + offset);
+}
+
+static inline void frmb_iow(void __iomem *lp, off_t offset, u32 value)
+{
+ writel(value, (lp + offset));
+}
+
+phys_addr_t frmb_add_dmabuf(u32 fd, struct frmb_struct *frmb_g)
+{
+ frmb_g->db.dbuf = dma_buf_get(fd);
+ frmb_g->db.dbuf_attach = dma_buf_attach(frmb_g->db.dbuf, frmb_g->dev);
+ if (IS_ERR(frmb_g->db.dbuf_attach)) {
+ dma_buf_put(frmb_g->db.dbuf);
+ dev_err(frmb_g->dev, "Failed DMA-BUF attach\n");
+ return -EINVAL;
+ }
+
+ frmb_g->db.dbuf_sg_table = dma_buf_map_attachment(frmb_g->db.dbuf_attach
+ , DMA_BIDIRECTIONAL);
+
+ if (!frmb_g->db.dbuf_sg_table) {
+ dev_err(frmb_g->dev, "Failed DMA-BUF map_attachment\n");
+ dma_buf_detach(frmb_g->db.dbuf, frmb_g->db.dbuf_attach);
+ dma_buf_put(frmb_g->db.dbuf);
+ return -EINVAL;
+ }
+
+ return (u32)sg_dma_address(frmb_g->db.dbuf_sg_table->sgl);
+}
+
+static void xlnk_clear_dmabuf(struct frmb_struct *frmb_g)
+{
+ dma_buf_unmap_attachment(frmb_g->db.dbuf_attach,
+ frmb_g->db.dbuf_sg_table,
+ DMA_BIDIRECTIONAL);
+ dma_buf_detach(frmb_g->db.dbuf, frmb_g->db.dbuf_attach);
+ dma_buf_put(frmb_g->db.dbuf);
+}
+
+static long frmb_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long retval = 0;
+ struct frmb_data data;
+ phys_addr_t phys_y = 0, phys_uv = 0;
+ struct frmb_struct *frmb_g = to_frmb_struct(file);
+
+ switch (cmd) {
+ case XSET_FB_POLL:
+ retval = frmb_ior(frmb_g->regs, XFBWR_FB_CTRL);
+ if (retval == XFBWR_FB_CTRL_IDLE)
+ retval = 0;
+ else
+ retval = 1;
+ break;
+ case XSET_FB_ENABLE_SNGL:
+ frmb_iow(frmb_g->regs, XFBWR_FB_CTRL, XFBWR_FB_CTRL_START);
+ break;
+ case XSET_FB_ENABLE:
+ frmb_iow(frmb_g->regs, XFBWR_FB_CTRL, XFBWR_FB_CTRL_START);
+ frmb_iow(frmb_g->regs, XFBWR_FB_CTRL,
+ XFBWR_FB_CTRL_RESTART | XFBWR_FB_CTRL_START);
+ break;
+ case XSET_FB_DISABLE:
+ frmb_iow(frmb_g->regs, XFBWR_FB_CTRL, XFBWR_FB_CTRL_OFF);
+ break;
+ case XSET_FB_CONFIGURE:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ break;
+ }
+ frmb_iow(frmb_g->regs, XFBWR_FB_WIDTH, data.width);
+ frmb_iow(frmb_g->regs, XFBWR_FB_HEIGHT, data.height);
+ frmb_iow(frmb_g->regs, XFBWR_FB_STRIDE, data.stride);
+ frmb_iow(frmb_g->regs, XFBWR_FB_COLOR, data.color);
+ break;
+ case XSET_FB_CAPTURE:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ break;
+ }
+ phys_y = frmb_add_dmabuf(data.fd, frmb_g);
+ frmb_iow(frmb_g->regs, XFBWR_FB_PLANE1, phys_y);
+ if (data.n_planes == 2) {
+ phys_uv = phys_y + data.offset;
+ frmb_iow(frmb_g->regs, XFBWR_FB_PLANE2, phys_uv);
+ }
+ break;
+ case XSET_FB_RELEASE:
+ xlnk_clear_dmabuf(frmb_g);
+ break;
+ default:
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+static const struct file_operations frmb_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = frmb_ioctl,
+};
+
+static int frmb_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int ret;
+ struct resource *res_frmb;
+ const struct of_device_id *match;
+ struct frmb_struct *frmb_g;
+ struct gpio_desc *reset_gpio;
+ const struct match_struct *config;
+
+ pdev->dev.dma_mask = &dma_mask;
+ pdev->dev.coherent_dma_mask = dma_mask;
+
+ frmb_g = devm_kzalloc(&pdev->dev, sizeof(*frmb_g), GFP_KERNEL);
+ if (!frmb_g)
+ return -ENOMEM;
+
+ reset_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio)) {
+ ret = PTR_ERR(reset_gpio);
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(&pdev->dev, "No gpio probed, Deferring...\n");
+ else
+ dev_err(&pdev->dev, "No reset gpio info from dts\n");
+ return ret;
+ }
+ gpiod_set_value_cansleep(reset_gpio, 0);
+
+ platform_set_drvdata(pdev, frmb_g);
+ frmb_g->dev = &pdev->dev;
+ res_frmb = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ frmb_g->regs = devm_ioremap_resource(&pdev->dev, res_frmb);
+ if (IS_ERR(frmb_g->regs))
+ return PTR_ERR(frmb_g->regs);
+
+ match = of_match_node(frmb_of_match, node);
+ if (!match)
+ return -ENODEV;
+
+ config = match->data;
+ frmb_g->frmb_miscdev.name = config->name;
+ frmb_g->is_fbrd = config->is_read;
+
+ frmb_g->frmb_miscdev.minor = MISC_DYNAMIC_MINOR;
+ frmb_g->frmb_miscdev.fops = &frmb_fops;
+ frmb_g->frmb_miscdev.parent = NULL;
+ ret = misc_register(&frmb_g->frmb_miscdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "FrameBuffer control driver registration failed!\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "FrameBuffer control driver success!\n");
+
+ return ret;
+}
+
+static int frmb_remove(struct platform_device *pdev)
+{
+ struct frmb_struct *frmb_g = platform_get_drvdata(pdev);
+
+ misc_deregister(&frmb_g->frmb_miscdev);
+ return 0;
+}
+
+static struct platform_driver frmb_driver = {
+ .probe = frmb_probe,
+ .remove = frmb_remove,
+ .driver = {
+ .name = "xlnx_ctrl-frmb",
+ .of_match_table = frmb_of_match,
+ },
+};
+
+module_platform_driver(frmb_driver);
+
+MODULE_DESCRIPTION("Xilinx Framebuffer control driver");
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/xlnx_ctrl_driver/xlnx_vpss.c b/drivers/staging/xlnx_ctrl_driver/xlnx_vpss.c
new file mode 100644
index 000000000000..017ad0a4cffd
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/xlnx_vpss.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx VPSS control driver.
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabh.singh@xilinx.com>
+ */
+
+/* TODO: clock framework */
+
+#include <linux/fs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/xlnx_ctrl.h>
+
+/* VPSS block offset */
+#define XHSCALER_OFFSET 0
+#define XSAXIS_RST_OFFSET 0x10000
+#define XVSCALER_OFFSET 0x20000
+
+#define XVPSS_GPIO_CHAN 8
+
+#define XVPSS_MAX_WIDTH 3840
+#define XVPSS_MAX_HEIGHT 2160
+
+#define XVPSS_STEPPREC 65536
+
+/* Video IP PPC */
+#define XVPSS_PPC_1 1
+#define XVPSS_PPC_2 2
+
+#define XVPSS_MAX_TAPS 12
+#define XVPSS_PHASES 64
+#define XVPSS_TAPS_6 6
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XVPSS_MASK_LOW_16BITS GENMASK(15, 0)
+#define XVPSS_MASK_LOW_32BITS GENMASK(31, 0)
+#define XVPSS_STEP_PRECISION_SHIFT (16)
+#define XVPSS_PHASE_SHIFT_BY_6 (6)
+#define XVPSS_PHASE_MULTIPLIER (9)
+#define XVPSS_BITSHIFT_16 (16)
+
+/* VPSS AP Control Registers */
+#define XVPSS_START BIT(0)
+#define XVPSS_RESTART BIT(7)
+#define XVPSS_STREAM_ON (XVPSS_START | XVPSS_RESTART)
+
+/* H-scaler registers */
+#define XVPSS_H_AP_CTRL (0x0000)
+#define XVPSS_H_GIE (0x0004)
+#define XVPSS_H_IER (0x0008)
+#define XVPSS_H_ISR (0x000c)
+#define XVPSS_H_HEIGHT (0x0010)
+#define XVPSS_H_WIDTHIN (0x0018)
+#define XVPSS_H_WIDTHOUT (0x0020)
+#define XVPSS_H_COLOR (0x0028)
+#define XVPSS_H_PIXELRATE (0x0030)
+#define XVPSS_H_COLOROUT (0X0038)
+#define XVPSS_H_HFLTCOEFF_BASE (0x0800)
+#define XVPSS_H_HFLTCOEFF_HIGH (0x0bff)
+#define XVPSS_H_PHASESH_V_BASE (0x2000)
+#define XVPSS_H_PHASESH_V_HIGH (0x3fff)
+
+/* H-scaler masks */
+#define XVPSS_PHASESH_WR_EN BIT(8)
+
+/* V-scaler registers */
+#define XVPSS_V_AP_CTRL (0x000)
+#define XVPSS_V_GIE (0x004)
+#define XVPSS_V_IER (0x008)
+#define XVPSS_V_ISR (0x00c)
+#define XVPSS_V_HEIGHTIN (0x010)
+#define XVPSS_V_WIDTH (0x018)
+#define XVPSS_V_HEIGHTOUT (0x020)
+#define XVPSS_V_LINERATE (0x028)
+#define XVPSS_V_COLOR (0x030)
+#define XVPSS_V_VFLTCOEFF_BASE (0x800)
+#define XVPSS_V_VFLTCOEFF_HIGH (0xbff)
+
+#define XVPSS_GPIO_RST_SEL 1
+#define XVPSS_GPIO_VIDEO_IN BIT(0)
+#define XVPSS_RST_IP_AXIS BIT(1)
+#define XVPSS_GPIO_MASK_ALL (XVPSS_GPIO_VIDEO_IN | XVPSS_RST_IP_AXIS)
+
+enum xvpss_color {
+ XVPSS_YUV_RGB,
+ XVPSS_YUV_444,
+ XVPSS_YUV_422,
+ XVPSS_YUV_420,
+};
+
+/* VPSS coefficients for 6 tap filters */
+static const u16
+xvpss_coeff_taps6[XVPSS_PHASES][XVPSS_TAPS_6] = {
+ { -132, 236, 3824, 236, -132, 64, },
+ { -116, 184, 3816, 292, -144, 64, },
+ { -100, 132, 3812, 348, -160, 64, },
+ { -88, 84, 3808, 404, -176, 64, },
+ { -72, 36, 3796, 464, -192, 64, },
+ { -60, -8, 3780, 524, -208, 68, },
+ { -48, -52, 3768, 588, -228, 68, },
+ { -32, -96, 3748, 652, -244, 68, },
+ { -20, -136, 3724, 716, -260, 72, },
+ { -8, -172, 3696, 784, -276, 72, },
+ { 0, -208, 3676, 848, -292, 72, },
+ { 12, -244, 3640, 920, -308, 76, },
+ { 20, -276, 3612, 988, -324, 76, },
+ { 32, -304, 3568, 1060, -340, 80, },
+ { 40, -332, 3532, 1132, -356, 80, },
+ { 48, -360, 3492, 1204, -372, 84, },
+ { 56, -384, 3448, 1276, -388, 88, },
+ { 64, -408, 3404, 1352, -404, 88, },
+ { 72, -428, 3348, 1428, -416, 92, },
+ { 76, -448, 3308, 1500, -432, 92, },
+ { 84, -464, 3248, 1576, -444, 96, },
+ { 88, -480, 3200, 1652, -460, 96, },
+ { 92, -492, 3140, 1728, -472, 100, },
+ { 96, -504, 3080, 1804, -484, 104, },
+ { 100, -516, 3020, 1880, -492, 104, },
+ { 104, -524, 2956, 1960, -504, 104, },
+ { 104, -532, 2892, 2036, -512, 108, },
+ { 108, -540, 2832, 2108, -520, 108, },
+ { 108, -544, 2764, 2184, -528, 112, },
+ { 112, -544, 2688, 2260, -532, 112, },
+ { 112, -548, 2624, 2336, -540, 112, },
+ { 112, -548, 2556, 2408, -544, 112, },
+ { 112, -544, 2480, 2480, -544, 112, },
+ { 112, -544, 2408, 2556, -548, 112, },
+ { 112, -540, 2336, 2624, -548, 112, },
+ { 112, -532, 2260, 2688, -544, 112, },
+ { 112, -528, 2184, 2764, -544, 108, },
+ { 108, -520, 2108, 2832, -540, 108, },
+ { 108, -512, 2036, 2892, -532, 104, },
+ { 104, -504, 1960, 2956, -524, 104, },
+ { 104, -492, 1880, 3020, -516, 100, },
+ { 104, -484, 1804, 3080, -504, 96, },
+ { 100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+/**
+ * struct xvpss_struct - Xilinx VPSS ctrl object
+ *
+ * @dev: device structure
+ * @xvpss_miscdev: The misc device registered
+ * @regs: Base address of VPSS
+ * @n_taps: number of horizontal/vertical taps
+ * @ppc: Pixels per Clock cycle the IP operates upon
+ * @is_polyphase: True for polypshase else false
+ * @vpss_coeff: The complete array of H-scaler/V-scaler coefficients
+ * @H_phases: The phases needed to program the H-scaler for different taps
+ * @reset_gpio: GPIO reset line to bring VPSS Scaler out of reset
+ */
+struct xvpss_struct {
+ struct device *dev;
+ struct miscdevice xvpss_miscdev;
+ void __iomem *regs;
+ int n_taps;
+ int ppc;
+ bool is_polyphase;
+ short vpss_coeff[XVPSS_PHASES][XVPSS_MAX_TAPS];
+ u32 H_phases[XVPSS_MAX_WIDTH];
+ struct gpio_desc *reset_gpio;
+};
+
+struct xvpss_data {
+ u32 height_in;
+ u32 width_in;
+ u32 height_out;
+ u32 width_out;
+ u32 color_in;
+ u32 color_out;
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id xvpss_of_match[] = {
+ { .compatible = "xlnx,ctrl-xvpss-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xvpss_of_match);
+
+static inline struct xvpss_struct *to_xvpss_struct(struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+
+ return container_of(miscdev, struct xvpss_struct, xvpss_miscdev);
+}
+
+static inline u32 xvpss_ior(void __iomem *lp, off_t offset)
+{
+ return readl(lp + offset);
+}
+
+static inline void xvpss_iow(void __iomem *lp, off_t offset, u32 value)
+{
+ writel(value, (lp + offset));
+}
+
+static inline void xvpss_clr(void __iomem *base, u32 offset, u32 clr)
+{
+ xvpss_iow(base, offset, xvpss_ior(base, offset) & ~clr);
+}
+
+static inline void xvpss_set(void __iomem *base, u32 offset, u32 set)
+{
+ xvpss_iow(base, offset, xvpss_ior(base, offset) | set);
+}
+
+static inline void xvpss_disable_block(struct xvpss_struct *xvpss_g,
+ u32 channel, u32 ip_block)
+{
+ xvpss_clr(xvpss_g->regs, ((channel - 1) * XVPSS_GPIO_CHAN) +
+ XSAXIS_RST_OFFSET, ip_block);
+}
+
+static inline void
+xvpss_enable_block(struct xvpss_struct *xvpss_g, u32 channel, u32 ip_block)
+{
+ xvpss_set(xvpss_g->regs, ((channel - 1) * XVPSS_GPIO_CHAN) +
+ XSAXIS_RST_OFFSET, ip_block);
+}
+
+static void xvpss_reset(struct xvpss_struct *xvpss_g)
+{
+ xvpss_disable_block(xvpss_g, XVPSS_GPIO_RST_SEL, XVPSS_GPIO_MASK_ALL);
+ xvpss_enable_block(xvpss_g, XVPSS_GPIO_RST_SEL, XVPSS_RST_IP_AXIS);
+}
+
+static void xvpss_enable(struct xvpss_struct *xvpss_g)
+{
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET +
+ XVPSS_H_AP_CTRL, XVPSS_STREAM_ON);
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET +
+ XVPSS_V_AP_CTRL, XVPSS_STREAM_ON);
+ xvpss_enable_block(xvpss_g, XVPSS_GPIO_RST_SEL, XVPSS_RST_IP_AXIS);
+}
+
+static void xvpss_disable(struct xvpss_struct *xvpss_g)
+{
+ xvpss_disable_block(xvpss_g, XVPSS_GPIO_RST_SEL, XVPSS_GPIO_MASK_ALL);
+}
+
+static void xvpss_set_input(struct xvpss_struct *xvpss_g,
+ u32 width, u32 height, u32 color)
+{
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_HEIGHTIN, height);
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_WIDTH, width);
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_WIDTHIN, width);
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_COLOR, color);
+}
+
+static void xvpss_set_output(struct xvpss_struct *xvpss_g, u32 width,
+ u32 height, u32 color)
+{
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_HEIGHTOUT, height);
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_HEIGHT, height);
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_WIDTHOUT, width);
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_COLOROUT, color);
+}
+
+static void xvpss_load_ext_coeff(struct xvpss_struct *xvpss_g,
+ const short *coeff, u32 ntaps)
+{
+ unsigned int i, j, pad, offset;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XVPSS_MAX_TAPS - ntaps;
+ offset = pad >> 1;
+ /* Load coefficients into vpss coefficient table */
+ for (i = 0; i < XVPSS_PHASES; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xvpss_g->vpss_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+ if (pad) {
+ for (i = 0; i < XVPSS_PHASES; i++) {
+ for (j = 0; j < offset; j++)
+ xvpss_g->vpss_coeff[i][j] = 0;
+ j = ntaps + offset;
+ for (; j < XVPSS_MAX_TAPS; j++)
+ xvpss_g->vpss_coeff[i][j] = 0;
+ }
+ }
+}
+
+static void xvpss_select_coeff(struct xvpss_struct *xvpss_g)
+{
+ const short *coeff;
+ u32 ntaps;
+
+ coeff = &xvpss_coeff_taps6[0][0];
+ ntaps = XVPSS_TAPS_6;
+
+ xvpss_load_ext_coeff(xvpss_g, coeff, ntaps);
+}
+
+static void xvpss_set_coeff(struct xvpss_struct *xvpss_g)
+{
+ u32 nphases = XVPSS_PHASES;
+ u32 ntaps = xvpss_g->n_taps;
+ int val, i, j, offset, rd_indx;
+ u32 v_addr, h_addr;
+
+ offset = (XVPSS_MAX_TAPS - ntaps) / 2;
+ v_addr = XVSCALER_OFFSET + XVPSS_V_VFLTCOEFF_BASE;
+ h_addr = XHSCALER_OFFSET + XVPSS_H_HFLTCOEFF_BASE;
+
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xvpss_g->vpss_coeff[i][rd_indx + 1] <<
+ XVPSS_BITSHIFT_16) | (xvpss_g->vpss_coeff[i][rd_indx] &
+ XVPSS_MASK_LOW_16BITS);
+ xvpss_iow(xvpss_g->regs, v_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ xvpss_iow(xvpss_g->regs, h_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+static void xvpss_h_calculate_phases(struct xvpss_struct *xvpss_g,
+ u32 width_in, u32 width_out,
+ u32 pixel_rate)
+{
+ unsigned int loop_width, x, s, nphases = XVPSS_PHASES;
+ unsigned int nppc = xvpss_g->ppc;
+ unsigned int shift = XVPSS_STEP_PRECISION_SHIFT - ilog2(nphases);
+ int offset = 0, xwrite_pos = 0, nr_rds, nr_rds_clck;
+ bool output_write_en, get_new_pix;
+ u64 phaseH;
+ u32 array_idx = 0;
+
+ loop_width = max_t(u32, width_in, width_out);
+ loop_width = ALIGN(loop_width + nppc - 1, nppc);
+
+ memset(xvpss_g->H_phases, 0, sizeof(xvpss_g->H_phases));
+ for (x = 0; x < loop_width; x++) {
+ nr_rds_clck = 0;
+ for (s = 0; s < nppc; s++) {
+ phaseH = (offset >> shift) & (nphases - 1);
+ get_new_pix = false;
+ output_write_en = false;
+ if ((offset >> XVPSS_STEP_PRECISION_SHIFT) != 0) {
+ get_new_pix = true;
+ offset -= (1 << XVPSS_STEP_PRECISION_SHIFT);
+ array_idx++;
+ }
+
+ if (((offset >> XVPSS_STEP_PRECISION_SHIFT) == 0) &&
+ xwrite_pos < width_out) {
+ offset += pixel_rate;
+ output_write_en = true;
+ xwrite_pos++;
+ }
+
+ xvpss_g->H_phases[x] |= (phaseH <<
+ (s * XVPSS_PHASE_MULTIPLIER));
+ xvpss_g->H_phases[x] |= (array_idx <<
+ (XVPSS_PHASE_SHIFT_BY_6 +
+ (s * XVPSS_PHASE_MULTIPLIER)));
+ if (output_write_en) {
+ xvpss_g->H_phases[x] |= (XVPSS_PHASESH_WR_EN <<
+ (s * XVPSS_PHASE_MULTIPLIER));
+ }
+
+ if (get_new_pix)
+ nr_rds_clck++;
+ }
+ if (array_idx >= nppc)
+ array_idx &= (nppc - 1);
+
+ nr_rds += nr_rds_clck;
+ if (nr_rds >= nppc)
+ nr_rds -= nppc;
+ }
+}
+
+static void xvpss_h_set_phases(struct xvpss_struct *xvpss_g)
+{
+ u32 loop_width, index, val, offset, i, lsb, msb;
+
+ loop_width = XVPSS_MAX_WIDTH / xvpss_g->ppc;
+ offset = XHSCALER_OFFSET + XVPSS_H_PHASESH_V_BASE;
+
+ switch (xvpss_g->ppc) {
+ case XVPSS_PPC_1:
+ index = 0;
+ for (i = 0; i < loop_width; i += 2) {
+ lsb = xvpss_g->H_phases[i] & XVPSS_MASK_LOW_16BITS;
+ msb = xvpss_g->H_phases[i + 1] & XVPSS_MASK_LOW_16BITS;
+ val = (msb << 16 | lsb);
+ xvpss_iow(xvpss_g->regs, offset +
+ (index * 4), val);
+ ++index;
+ }
+ return;
+ case XVPSS_PPC_2:
+ for (i = 0; i < loop_width; i++) {
+ val = (xvpss_g->H_phases[i] & XVPSS_MASK_LOW_32BITS);
+ xvpss_iow(xvpss_g->regs, offset + (i * 4), val);
+ }
+ return;
+ }
+}
+
+static void xvpss_algo_config(struct xvpss_struct *xvpss_g,
+ struct xvpss_data data)
+{
+ u32 pxl_rate, line_rate;
+ u32 width_in = data.width_in;
+ u32 width_out = data.width_out;
+ u32 height_in = data.height_in;
+ u32 height_out = data.height_out;
+
+ line_rate = (height_in * XVPSS_STEPPREC) / height_out;
+
+ if (xvpss_g->is_polyphase) {
+ xvpss_select_coeff(xvpss_g);
+ xvpss_set_coeff(xvpss_g);
+ }
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_LINERATE, line_rate);
+ pxl_rate = (width_in * XVPSS_STEPPREC) / width_out;
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_PIXELRATE, pxl_rate);
+
+ xvpss_h_calculate_phases(xvpss_g, width_in, width_out, pxl_rate);
+ xvpss_h_set_phases(xvpss_g);
+}
+
+static long xvpss_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long retval = 0;
+ struct xvpss_data data;
+ struct xvpss_struct *xvpss_g = to_xvpss_struct(file);
+ u32 hcol;
+
+ switch (cmd) {
+ case XVPSS_SET_CONFIGURE:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ xvpss_reset(xvpss_g);
+ xvpss_set_input(xvpss_g, data.width_in, data.height_in,
+ data.color_in);
+ hcol = data.color_in;
+ if (hcol == XVPSS_YUV_420)
+ hcol = XVPSS_YUV_422;
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_COLOR, hcol);
+ xvpss_set_output(xvpss_g, data.width_out, data.height_out,
+ data.color_out);
+ xvpss_algo_config(xvpss_g, data);
+ break;
+ case XVPSS_SET_ENABLE:
+ xvpss_enable(xvpss_g);
+ break;
+ case XVPSS_SET_DISABLE:
+ xvpss_disable(xvpss_g);
+ break;
+ default:
+ retval = -EINVAL;
+ }
+end:
+ return retval;
+}
+
+static const struct file_operations xvpss_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = xvpss_ioctl,
+};
+
+static int xvpss_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *res;
+ struct xvpss_struct *xvpss_g;
+ struct device_node *node;
+
+ xvpss_g = devm_kzalloc(&pdev->dev, sizeof(*xvpss_g), GFP_KERNEL);
+ if (!xvpss_g)
+ return -ENOMEM;
+
+ xvpss_g->reset_gpio = devm_gpiod_get(&pdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(xvpss_g->reset_gpio)) {
+ ret = PTR_ERR(xvpss_g->reset_gpio);
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(&pdev->dev, "No gpio probed, Deferring...\n");
+ else
+ dev_err(&pdev->dev, "No reset gpio info from dts\n");
+ return ret;
+ }
+ gpiod_set_value_cansleep(xvpss_g->reset_gpio, 0);
+
+ platform_set_drvdata(pdev, &xvpss_g);
+ xvpss_g->dev = &pdev->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xvpss_g->regs = devm_ioremap_resource(xvpss_g->dev, res);
+ if (IS_ERR(xvpss_g->regs))
+ return PTR_ERR(xvpss_g->regs);
+
+ node = pdev->dev.of_node;
+ ret = of_property_read_u32(node, "xlnx,vpss-taps", &xvpss_g->n_taps);
+ if (ret < 0) {
+ dev_err(xvpss_g->dev, "taps not present in DT\n");
+ return ret;
+ }
+
+ switch (xvpss_g->n_taps) {
+ case 2:
+ case 4:
+ break;
+ case 6:
+ xvpss_g->is_polyphase = true;
+ break;
+ default:
+ dev_err(xvpss_g->dev, "taps value not supported\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,vpss-ppc", &xvpss_g->ppc);
+ if (ret < 0) {
+ dev_err(xvpss_g->dev, "PPC is missing in DT\n");
+ return ret;
+ }
+ if (xvpss_g->ppc != XVPSS_PPC_1 && xvpss_g->ppc != XVPSS_PPC_2) {
+ dev_err(xvpss_g->dev, "Unsupported ppc: %d", xvpss_g->ppc);
+ return -EINVAL;
+ }
+
+ xvpss_g->xvpss_miscdev.minor = MISC_DYNAMIC_MINOR;
+ xvpss_g->xvpss_miscdev.name = "xvpss";
+ xvpss_g->xvpss_miscdev.fops = &xvpss_fops;
+ ret = misc_register(&xvpss_g->xvpss_miscdev);
+ if (ret < 0) {
+ pr_err("Xilinx VPSS registration failed!\n");
+ return ret;
+ }
+
+ dev_info(xvpss_g->dev, "Xlnx VPSS control driver initialized!\n");
+
+ return ret;
+}
+
+static int xvpss_remove(struct platform_device *pdev)
+{
+ struct xvpss_struct *xvpss_g = platform_get_drvdata(pdev);
+
+ misc_deregister(&xvpss_g->xvpss_miscdev);
+ return 0;
+}
+
+static struct platform_driver xvpss_driver = {
+ .probe = xvpss_probe,
+ .remove = xvpss_remove,
+ .driver = {
+ .name = "xlnx_vpss",
+ .of_match_table = xvpss_of_match,
+ },
+};
+
+module_platform_driver(xvpss_driver);
+
+MODULE_DESCRIPTION("Xilinx VPSS control driver");
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/xlnx_ernic/Kconfig b/drivers/staging/xlnx_ernic/Kconfig
new file mode 100644
index 000000000000..2d83fea0f3b9
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/Kconfig
@@ -0,0 +1,4 @@
+config ERNIC
+ tristate "Xilinx ERNIC driver"
+ help
+ Driver for the XILINX Embedded Remote DMA(RDMA) Enabled NIC.
diff --git a/drivers/staging/xlnx_ernic/MAINTAINERS b/drivers/staging/xlnx_ernic/MAINTAINERS
new file mode 100644
index 000000000000..0355f5d3320f
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX EMBEDDED REMOTE DMA ENABLED NIC
+M: Sandeep Dhanvada <sandeep.dhanvada@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnx_ernic
diff --git a/drivers/staging/xlnx_ernic/Makefile b/drivers/staging/xlnx_ernic/Makefile
new file mode 100644
index 000000000000..564933fa42d7
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/Makefile
@@ -0,0 +1,7 @@
+#TODO: Need to remove these flags and fix compilation warnings.
+ccflags-y := -Wno-incompatible-pointer-types -Wno-packed-bitfield-compat
+
+obj-m += xernic.o
+obj-m += xernic_bw_test.o
+
+xernic-objs := xmain.o xcm.o xqp.o xmr.o
diff --git a/drivers/staging/xlnx_ernic/dt-binding.txt b/drivers/staging/xlnx_ernic/dt-binding.txt
new file mode 100644
index 000000000000..2a9d098125b7
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/dt-binding.txt
@@ -0,0 +1,29 @@
+Xilinx Embedded RDMA NIC (ERNIC)
+--------------------------------
+
+The Xilinx Embedded Remote DMA(RDMA) NIC is an implementation of
+RDMA over Converged Ethernet (RoCEv2) enabled NIC functionality.
+
+Supported features by ERNIC are:
+1. both IPv4 and IPv6.
+2. 100 Gb/s data path.
+3. Incoming and outgoing RDMA READ, RDMA WRITE and RDMA SEND.
+
+Required properties:
+- compatible : Must contain "xlnx,ernic-1.0".
+- interrupts: Contains the interrupt line numbers.
+- reg: Physical base address and length of the registers set for the device.
+
+ernic_0: ernic@84000000 {
+ compatible = "xlnx,ernic-1.0";
+ interrupts = <4 2
+ 5 2
+ 6 2
+ 7 2
+ 8 2
+ 9 2
+ 10 2
+ 11 2
+ 12 2>;
+ reg = <0x84000000 0x40000>;
+};
diff --git a/drivers/staging/xlnx_ernic/xcm.c b/drivers/staging/xlnx_ernic/xcm.c
new file mode 100644
index 000000000000..64d102e540b4
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xcm.c
@@ -0,0 +1,1962 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#include "xcommon.h"
+
+unsigned int psn_num;
+unsigned int mad_tid = 0x11223344;
+/*****************************************************************************/
+
+/**
+ * xrnic_cm_prepare_mra() - Prepares Message Receipt Acknowledgment packet
+ * @qp_attr: qp info for which mra packet is prepared
+ * @msg : message being MRAed. 0x0- REQ, 0x1-REP, 0x2-LAP
+ * @rq_buf: Buffer to store the message
+ */
+static void xrnic_cm_prepare_mra(struct xrnic_qp_attr *qp_attr,
+ enum xrnic_msg_mra msg, void *rq_buf)
+{
+ struct mra *mra;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ mra = (struct mra *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(MSG_RSP_ACK);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ mra = (struct mra *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(MSG_RSP_ACK);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ mra->local_cm_id = qp_attr->local_cm_id;
+ mra->remote_comm_id = qp_attr->remote_cm_id;
+ pr_info("[%d %s] remote_comm_id 0%x\n", __LINE__, __func__,
+ mra->remote_comm_id);
+ mra->message_mraed = msg;
+ mra->service_timeout = XRNIC_MRA_SERVICE_TIMEOUT;
+ /*4.096 ìS*2 Service Timeout*/
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_rep() - Prepares Reply packet
+ * @qp_attr: qp info for which reply packet is prepared
+ * @rq_buf: Buffer to store the data indicating the acceptance
+ */
+static void xrnic_cm_prepare_rep(struct xrnic_qp_attr *qp_attr, void *rq_buf)
+{
+ struct rdma_qp_attr *rdma_qp_attr = (struct rdma_qp_attr *)
+ &((struct xrnic_reg_map *)xrnic_dev->xrnic_mmap.xrnic_regs)
+ ->rdma_qp_attr[qp_attr->qp_num - 2];
+ struct ethhdr_t *eth_hdr;
+ struct ipv4hdr *ipv4 = NULL;
+ struct ipv6hdr *ipv6 = NULL;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4 = NULL;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6 = NULL;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+ struct rep *rep;
+ struct req *req;
+ unsigned short temp;
+ unsigned char rq_opcode;
+ unsigned int config_value, start_psn_value;
+ struct xrnic_rdma_cm_id *cm_id = qp_attr->cm_id;
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ rep = (struct rep *)&send_sgl_temp_ipv4->mad.data;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv4);
+ ipv4 = (struct ipv4hdr *)
+ ((char *)recv_qp_pkt_ipv4 + XRNIC_ETH_HLEN);
+ req = (struct req *)&recv_qp_pkt_ipv4->mad.data;
+ temp = htons(CONNECT_REPLY);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ rep = (struct rep *)&send_sgl_temp_ipv6->mad.data;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv6);
+ ipv6 = (struct ipv6hdr *)
+ ((char *)recv_qp_pkt_ipv6 + XRNIC_ETH_HLEN);
+ req = (struct req *)&recv_qp_pkt_ipv6->mad.data;
+ temp = htons(CONNECT_REPLY);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ DEBUG_LOG("qp_num:%x\n", qp_attr->qp_num);
+
+ rep->local_cm_id = qp_attr->local_cm_id;
+ rep->remote_comm_id = qp_attr->remote_cm_id;
+
+ rep->local_qpn = ((qp_attr->qp_num >> 16) & 0xFF) |
+ (((qp_attr->qp_num >> 8) & 0xFF) << 8) |
+ ((qp_attr->qp_num & 0xFF) << 16);
+ DEBUG_LOG("local_qpn %d qp_num %d\n",
+ rep->local_qpn, qp_attr->qp_num);
+
+ memcpy((void *)rep->private_data,
+ (void *)&cm_id->conn_param.private_data,
+ cm_id->conn_param.private_data_len);
+
+ DEBUG_LOG("cm_id->conn_param.private_data_len %d\n",
+ cm_id->conn_param.private_data_len);
+ DEBUG_LOG("cm_id->conn_param.responder_resources %d\n",
+ cm_id->conn_param.responder_resources);
+ DEBUG_LOG("cm_id->conn_param.initiator_depth %d\n",
+ cm_id->conn_param.initiator_depth);
+ DEBUG_LOG("cm_id->conn_param.flow_control %d\n",
+ cm_id->conn_param.flow_control);
+ DEBUG_LOG("cm_id->conn_param.retry_count %d\n",
+ cm_id->conn_param.retry_count);
+ DEBUG_LOG("cm_id->conn_param.rnr_retry_count %d\n",
+ cm_id->conn_param.rnr_retry_count);
+
+ /*Inititator depth not rquired for Target.*/
+ rep->initiator_depth = cm_id->conn_param.initiator_depth;
+ rep->responder_resources = cm_id->conn_param.responder_resources;
+ rep->end_end_flow_control = cm_id->conn_param.flow_control;
+ rep->rnr_retry_count = cm_id->conn_param.rnr_retry_count;
+ rep->target_ack_delay = XRNIC_REP_TARGET_ACK_DELAY;
+ rep->fail_over_accepted = XRNIC_REP_FAIL_OVER_ACCEPTED;
+
+ DEBUG_LOG("req->initiator_depth %x\n", rep->initiator_depth);
+ DEBUG_LOG("rep->responder_resources %x\n", rep->responder_resources);
+
+ rep->sqr = XRNIC_REQ_SRQ;
+ rep->local_ca_guid[0] = 0x7c;
+ rep->local_ca_guid[1] = 0xfe;
+ rep->local_ca_guid[2] = 0x90;
+ rep->local_ca_guid[3] = 0x03;
+ rep->local_ca_guid[4] = 0x00;
+ rep->local_ca_guid[5] = 0xb8;
+ rep->local_ca_guid[6] = 0x57;
+ rep->local_ca_guid[7] = 0x70;
+
+ qp_attr->remote_qpn = req->local_qpn;
+
+ DEBUG_LOG("local_qpn [0x%x] [%d]\n", req->local_qpn,
+ ntohl(req->local_qpn));
+ config_value = ((req->local_qpn & 0xFF) << 16)
+ | (((req->local_qpn >> 8) & 0xFF) << 8)
+ | ((req->local_qpn >> 16) & 0xFF);
+
+ pr_info("config_value:%d req->local_qpn %d qp_attr->remote_qpn %d\n",
+ config_value, req->local_qpn, qp_attr->remote_qpn);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->dest_qp_conf)));
+
+ /* Set the MAC address */
+ config_value = eth_hdr->h_source[5] | (eth_hdr->h_source[4] << 8) |
+ (eth_hdr->h_source[3] << 16) |
+ (eth_hdr->h_source[2] << 24);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->mac_dest_addr_lsb)));
+ DEBUG_LOG("mac_xrnic_src_addr_lsb->0x%x\n", config_value);
+
+ config_value = eth_hdr->h_source[1] | (eth_hdr->h_source[0] << 8);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->mac_dest_addr_msb)));
+ DEBUG_LOG("mac_xrnic_src_addr_msb->0x%x\n", config_value);
+
+ config_value = 0;
+ DEBUG_LOG("req->start_psn:%x %x %x\n", req->start_psn[0],
+ req->start_psn[1], req->start_psn[2]);
+ config_value = (req->start_psn[2] | (req->start_psn[1] << 8) |
+ (req->start_psn[0] << 16));
+ DEBUG_LOG("req->start psn 0x%x\n", config_value);
+ start_psn_value = config_value;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_psn)));
+ memcpy(rep->start_psn, req->start_psn, 3);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ config_value = ipv4->src_addr;
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr1)));
+ config_value = ioread32((void *)&rdma_qp_attr->ip_dest_addr1);
+ DEBUG_LOG("read ipaddress:%x\n", config_value);
+ } else {
+ config_value = ipv6->saddr.in6_u.u6_addr32[3];
+ DEBUG_LOG("ipaddress1:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr1)));
+ config_value = ipv6->saddr.in6_u.u6_addr32[2];
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr2)));
+ config_value = ipv6->saddr.in6_u.u6_addr32[1];
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr3)));
+ config_value = ipv6->saddr.in6_u.u6_addr32[0];
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr4)));
+ config_value = ioread32((void *)&rdma_qp_attr->qp_conf);
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ DEBUG_LOG("read ipaddress:%x\n", config_value);
+ }
+ rq_opcode = XRNIC_RDMA_READ;
+ config_value = ((start_psn_value - 1) | (rq_opcode << 24));
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->last_rq_req)));
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_rej() - Prepares Reject packet
+ * @qp_attr: qp info for which reply packet is prepared
+ * @reason: reason for the rejection
+ * @msg: message whose contents cause sendor to reject communication
+ * 0x0-REQ, 0x1-REP, 0x2-No message
+ */
+void xrnic_cm_prepare_rej(struct xrnic_qp_attr *qp_attr,
+ enum xrnic_rej_reason reason, enum xrnic_msg_rej msg)
+{
+ struct rej *rej;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ rej = (struct rej *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(CONNECT_REJECT);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ rej = (struct rej *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(CONNECT_REJECT);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ pr_info("Sending rej\n");
+
+ rej->local_cm_id = qp_attr->local_cm_id;
+ rej->remote_comm_id = qp_attr->remote_cm_id;
+ rej->message_rejected = msg;
+ rej->reason = htons(reason);
+ rej->reject_info_length = XRNIC_REJ_INFO_LEN;
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_prepare_initial_headers() - Retrieves information from the response
+ * @qp_attr: qp info on which the response is sent
+ * @rq_buf: receive queue buffer
+ */
+void xrnic_prepare_initial_headers(struct xrnic_qp_attr *qp_attr, void *rq_buf)
+{
+ struct mad *mad;
+ unsigned char temp;
+ struct ethhdr_t *eth_hdr;
+ struct ipv4hdr *ipv4;
+ struct ipv6hdr *ipv6;
+ struct udphdr *udp;
+ struct bth *bthp;
+ struct deth *dethp;
+ unsigned short *ipv4_hdr_ptr;
+ unsigned int ipv4_hdr_chksum;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ int i;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv4);
+ ipv4 = (struct ipv4hdr *)
+ ((char *)recv_qp_pkt_ipv4 + XRNIC_ETH_HLEN);
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ /* In the ethernet header swap source and desitnation MAC */
+ memcpy(send_sgl_temp_ipv4->eth.h_source,
+ eth_hdr->h_dest, XRNIC_ETH_ALEN);
+ memcpy(send_sgl_temp_ipv4->eth.h_dest,
+ eth_hdr->h_source, XRNIC_ETH_ALEN);
+ /* Copy the ethernet type field */
+ send_sgl_temp_ipv4->eth.eth_type = eth_hdr->eth_type;
+
+ /* In the IP header swap source IP and desitnation IP */
+ memcpy(&send_sgl_temp_ipv4->ipv4, ipv4,
+ sizeof(struct ipv4hdr));
+ send_sgl_temp_ipv4->ipv4.dest_addr = ipv4->src_addr;
+ send_sgl_temp_ipv4->ipv4.src_addr = ipv4->dest_addr;
+ ipv4->total_length = (sizeof(struct ipv4hdr) +
+ sizeof(struct udphdr) + sizeof(struct bth) +
+ sizeof(struct deth) + sizeof(struct mad)) + 4;
+ DEBUG_LOG("ipv4->total_length:%d\n", ipv4->total_length);
+ DEBUG_LOG("ipv4 length:%d\n", sizeof(struct ipv4hdr));
+ DEBUG_LOG("udp length:%d\n", sizeof(struct udphdr));
+ DEBUG_LOG("ethhdr length:%d\n", sizeof(struct ethhdr_t));
+ DEBUG_LOG("bth length:%d\n", sizeof(struct bth));
+ DEBUG_LOG("deth length:%d\n", sizeof(struct deth));
+
+ send_sgl_temp_ipv4->ipv4.total_length =
+ htons(ipv4->total_length);
+ send_sgl_temp_ipv4->ipv4.hdr_chksum = 0;
+ send_sgl_temp_ipv4->ipv4.id = ipv4->id;
+
+ ipv4_hdr_ptr = (unsigned short *)
+ (&send_sgl_temp_ipv4->ipv4);
+ ipv4_hdr_chksum = 0;
+
+ for (i = 0; i < 10; i++) {
+ ipv4_hdr_chksum += *ipv4_hdr_ptr;
+ ipv4_hdr_ptr++;
+ }
+
+ ipv4_hdr_chksum = ~((ipv4_hdr_chksum & 0x0000FFFF) +
+ (ipv4_hdr_chksum >> 16));
+ send_sgl_temp_ipv4->ipv4.hdr_chksum = ipv4_hdr_chksum;
+ DEBUG_LOG("check sum :%x\n", ipv4_hdr_chksum);
+ udp = (struct udphdr *)((char *)recv_qp_pkt_ipv4 +
+ XRNIC_ETH_HLEN + sizeof(struct ipv4hdr));
+ /* Copy the UDP packets and update length field */
+ send_sgl_temp_ipv4->udp.source = udp->source;
+ send_sgl_temp_ipv4->udp.dest = udp->dest;
+ udp->len = sizeof(struct udphdr) + sizeof(struct bth) +
+ sizeof(struct deth) + sizeof(struct mad) +
+ XRNIC_ICRC_SIZE;
+ DEBUG_LOG("udp total_length:%x\n", udp->len);
+ DEBUG_LOG("mad size:%d\n", sizeof(struct mad));
+ send_sgl_temp_ipv4->udp.len = htons(udp->len);
+ udp->check = 0;
+ send_sgl_temp_ipv4->udp.check = htons(udp->check);
+
+ /* Base Transport header setings */
+ bthp = (struct bth *)((char *)udp + sizeof(struct udphdr));
+
+ /* Fill bth fields */
+ send_sgl_temp_ipv4->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+ send_sgl_temp_ipv4->bth.solicited_event =
+ XRNIC_SET_SOLICT_EVENT;
+ send_sgl_temp_ipv4->bth.migration_req =
+ XRNIC_MIGRATION_REQ;
+ send_sgl_temp_ipv4->bth.pad_count = XRNIC_PAD_COUNT;
+ send_sgl_temp_ipv4->bth.transport_hdr_ver =
+ XRNIC_TRANSPORT_HDR_VER;
+ DEBUG_LOG("bth transport hdr ver:%x\n",
+ bthp->transport_hdr_ver);
+ send_sgl_temp_ipv4->bth.transport_hdr_ver =
+ bthp->transport_hdr_ver;
+ send_sgl_temp_ipv4->bth.destination_qp[0] = 0;
+ send_sgl_temp_ipv4->bth.destination_qp[1] = 0;
+ send_sgl_temp_ipv4->bth.destination_qp[2] =
+ XRNIC_DESTINATION_QP;
+ send_sgl_temp_ipv4->bth.reserved1 = XRNIC_RESERVED1;
+ send_sgl_temp_ipv4->bth.ack_request = XRNIC_ACK_REQ;
+ send_sgl_temp_ipv4->bth.reserved2 = XRNIC_RESERVED2;
+ send_sgl_temp_ipv4->bth.pkt_seq_num = 1;
+ send_sgl_temp_ipv4->bth.partition_key = 65535;
+
+ /* DETH setings */
+ dethp = (struct deth *)((char *)bthp + sizeof(struct bth));
+ send_sgl_temp_ipv4->deth.q_key = dethp->q_key;
+ send_sgl_temp_ipv4->deth.reserved = XRNIC_DETH_RESERVED;
+ send_sgl_temp_ipv4->deth.src_qp = dethp->src_qp;
+
+ /* MAD setings */
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ send_sgl_temp_ipv4->mad.base_ver = XRNIC_MAD_BASE_VER;
+ send_sgl_temp_ipv4->mad.class_version = 2;
+ DEBUG_LOG("class:%x\n", send_sgl_temp_ipv4->mad.class_version);
+ send_sgl_temp_ipv4->mad.mgmt_class = XRNIC_MAD_MGMT_CLASS;
+ temp = (XRNIC_MAD_RESP_BIT << 7) | XRNIC_MAD_COMM_SEND;
+ send_sgl_temp_ipv4->mad.resp_bit_method = temp;
+ DEBUG_LOG("mad method:%x\n",
+ send_sgl_temp_ipv4->mad.resp_bit_method);
+ send_sgl_temp_ipv4->mad.reserved = XRNIC_MAD_RESERVED;
+ send_sgl_temp_ipv4->mad.transaction_id = mad->transaction_id;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv6);
+ ipv6 = (struct ipv6hdr *)
+ ((char *)recv_qp_pkt_ipv6 + XRNIC_ETH_HLEN);
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ /* In the ethernet header swap source and desitnation MAC */
+ memcpy(send_sgl_temp_ipv6->eth.h_source,
+ eth_hdr->h_dest, XRNIC_ETH_ALEN);
+ memcpy(send_sgl_temp_ipv6->eth.h_dest,
+ eth_hdr->h_source, XRNIC_ETH_ALEN);
+ send_sgl_temp_ipv6->eth.eth_type = eth_hdr->eth_type;
+ memcpy(&send_sgl_temp_ipv6->ipv6, ipv6,
+ sizeof(struct ipv6hdr));
+ /* In the ethernet header swap source IP and desitnation IP */
+ memcpy(&send_sgl_temp_ipv6->ipv6.daddr, &ipv6->saddr,
+ sizeof(struct in6_addr));
+ memcpy(&send_sgl_temp_ipv6->ipv6.saddr, &ipv6->daddr,
+ sizeof(struct in6_addr));
+ udp = (struct udphdr *)((char *)recv_qp_pkt_ipv6 +
+ XRNIC_ETH_HLEN + sizeof(struct ipv6hdr));
+ /* Copy the UDP packets and update length field */
+ send_sgl_temp_ipv6->udp.source = udp->source;
+ send_sgl_temp_ipv6->udp.dest = udp->dest;
+ udp->len = sizeof(struct udphdr) + sizeof(struct bth) +
+ sizeof(struct deth) + sizeof(struct mad) +
+ XRNIC_ICRC_SIZE;
+ DEBUG_LOG("udp total_length:%x\n", udp->len);
+ DEBUG_LOG("mad size:%d\n", sizeof(struct mad));
+ send_sgl_temp_ipv6->udp.len = htons(udp->len);
+ udp->check = 0;
+ send_sgl_temp_ipv6->udp.check = htons(udp->check);
+
+ /* Base Transport header setings */
+ bthp = (struct bth *)((char *)udp + sizeof(struct udphdr));
+
+ /* Fill bth fields */
+ send_sgl_temp_ipv6->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+ send_sgl_temp_ipv6->bth.solicited_event =
+ XRNIC_SET_SOLICT_EVENT;
+ send_sgl_temp_ipv6->bth.migration_req = XRNIC_MIGRATION_REQ;
+ send_sgl_temp_ipv6->bth.pad_count = XRNIC_PAD_COUNT;
+ send_sgl_temp_ipv6->bth.transport_hdr_ver =
+ XRNIC_TRANSPORT_HDR_VER;
+ DEBUG_LOG("bth transport_hdr_ver:%x\n",
+ bthp->transport_hdr_ver);
+ send_sgl_temp_ipv6->bth.transport_hdr_ver =
+ bthp->transport_hdr_ver;
+ send_sgl_temp_ipv6->bth.destination_qp[0] = 0;
+ send_sgl_temp_ipv6->bth.destination_qp[1] = 0;
+ send_sgl_temp_ipv6->bth.destination_qp[2] =
+ XRNIC_DESTINATION_QP;
+ send_sgl_temp_ipv6->bth.reserved1 = XRNIC_RESERVED1;
+ send_sgl_temp_ipv6->bth.ack_request = XRNIC_ACK_REQ;
+ send_sgl_temp_ipv6->bth.reserved2 = XRNIC_RESERVED2;
+ send_sgl_temp_ipv6->bth.pkt_seq_num = 1;
+ send_sgl_temp_ipv6->bth.partition_key = 65535;
+
+ /* DETH setings */
+ dethp = (struct deth *)((char *)bthp + sizeof(struct bth));
+ send_sgl_temp_ipv6->deth.q_key = dethp->q_key;
+ send_sgl_temp_ipv6->deth.reserved = XRNIC_DETH_RESERVED;
+ send_sgl_temp_ipv6->deth.src_qp = dethp->src_qp;
+
+ /* MAD setings */
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ send_sgl_temp_ipv6->mad.base_ver = XRNIC_MAD_BASE_VER;
+ send_sgl_temp_ipv6->mad.class_version = 2;
+ DEBUG_LOG("class:%x\n", send_sgl_temp_ipv6->mad.class_version);
+ send_sgl_temp_ipv6->mad.mgmt_class = XRNIC_MAD_MGMT_CLASS;
+ temp = (XRNIC_MAD_RESP_BIT << 7) | XRNIC_MAD_COMM_SEND;
+ send_sgl_temp_ipv6->mad.resp_bit_method = temp;
+ DEBUG_LOG("mad method:%x\n",
+ send_sgl_temp_ipv6->mad.resp_bit_method);
+ send_sgl_temp_ipv6->mad.reserved = XRNIC_MAD_RESERVED;
+ send_sgl_temp_ipv6->mad.transaction_id = mad->transaction_id;
+ }
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_dreq() - Prepares Disconnection Request Packet
+ * @qp_attr: qp info to be released
+ */
+static void xrnic_cm_prepare_dreq(struct xrnic_qp_attr *qp_attr)
+{
+ struct dreq *dreq;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ dreq = (struct dreq *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(DISCONNECT_REQUEST);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ dreq = (struct dreq *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(DISCONNECT_REQUEST);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ dreq->local_cm_id = qp_attr->local_cm_id;
+ dreq->remote_comm_id = qp_attr->remote_cm_id;
+ dreq->remote_qpn_eecn = qp_attr->remote_qpn;
+
+ DEBUG_LOG("Exiting %s %d %d\n",
+ __func__, qp_attr->remote_qpn, dreq->remote_qpn_eecn);
+}
+
+/**
+ * xrnic_cm_disconnect_send_handler() - Sends Disconnection Request and frees
+ * all the attributes related to the qp
+ * @qp_attr: qp info to be released by dreq
+ */
+void xrnic_cm_disconnect_send_handler(struct xrnic_qp_attr *qp_attr)
+{
+ int qp1_send_pkt_size;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET)
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ else
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+
+ xrnic_cm_prepare_dreq(qp_attr);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_DREQ_SENT;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_drep() - Prepares disconnect reply packet
+ * @qp_attr: qp info for which drep packet is prepared
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_prepare_drep(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct drep *drep;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Enteing %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ drep = (struct drep *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(DISCONNECT_REPLY);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ drep = (struct drep *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(DISCONNECT_REPLY);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ drep->local_cm_id = qp_attr->local_cm_id;
+ drep->remote_comm_id = qp_attr->remote_cm_id;
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_disconnect_request_handler() - Handles Disconnection Request.
+ * @qp_attr: qp info on which the reply is to be sent
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_disconnect_request_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ int qp1_send_pkt_size;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s qp_num %d\n", __func__, qp_attr->qp_num);
+ if (qp_attr->cm_id) {
+ DEBUG_LOG("cm id is not clean qp_num %d\n", qp_attr->qp_num);
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_DREQ_RCVD;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ qp_attr->cm_id = NULL;
+ } else {
+ pr_err("CM ID is NULL\n");
+ }
+ if (qp_attr->ip_addr_type == AF_INET)
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ else
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ qp_attr->curr_state = XRNIC_DREQ_RCVD;
+ xrnic_cm_prepare_drep(qp_attr, rq_buf);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->resend_count = 0;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_disconnect_reply_handler() - Handles disconnect reply packets.
+ * @qp_attr: qp info of which qp to be destroyed
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_disconnect_reply_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ DEBUG_LOG("Entering %s\n", __func__);
+ qp_attr->curr_state = XRNIC_DREQ_RCVD;
+ /*Call back to nvmeof. */
+
+ /*TBD: Need to Change state while handling with Rimer.*/
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->resend_count = 0;
+
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_connect_reject_handler() - Handles connect reject packets.
+ * @qp_attr: qp info
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_connect_reject_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ struct mad *mad;
+ struct rej *rej;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ rej = (struct rej *)&mad->data;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ rej = (struct rej *)&mad->data;
+ }
+
+ if (rej->message_rejected == XRNIC_REJ_REP ||
+ rej->message_rejected == XRNIC_REJ_REQ ||
+ rej->message_rejected == XRNIC_REJ_OTHERS) {
+ qp_attr->resend_count = 0;
+ qp_attr->remote_cm_id = 0;
+ qp_attr->cm_id = NULL;
+ xrnic_reset_io_qp(qp_attr);
+ memset((void *)&qp_attr->mac_addr, 0, XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ xrnic_qp_app_configuration(qp_attr->qp_num,
+ XRNIC_HW_QP_DISABLE);
+ qp_attr->curr_state = XRNIC_LISTEN;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_REJ_RECV;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ } else {
+ pr_err("%s CM_ID is NULL\n", __func__);
+ }
+ }
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_msg_rsp_ack_handler() - Handles message response packets.
+ * @qp_attr: qp info
+ * @rq_buf: receive queue buffer
+ */
+void xrnic_cm_msg_rsp_ack_handler(struct xrnic_qp_attr *qp_attr, void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ struct mad *mad;
+ struct mra *mra;
+
+ DEBUG_LOG("Enter ing %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ mra = (struct mra *)&mad->data;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ mra = (struct mra *)&mad->data;
+ }
+
+ if (mra->message_mraed == XRNIC_MRA_REP) {
+ qp_attr->curr_state = XRNIC_MRA_RCVD;
+ qp_attr->resend_count = 0;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ }
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_connect_rep_handler() - handles connect reply packets
+ * @qp_attr : qp info
+ * @rq_buf : receive queue buffer
+ */
+static void xrnic_cm_connect_rep_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REP_RCVD;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_REP_RCVD;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ } else {
+ pr_err("%s CM_ID is NULL\n", __func__);
+ }
+ pr_info("Connection Established Local QPn=%#x\n", qp_attr->qp_num);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_ready_to_use_handler() - handles ready to use packets
+ * @qp_attr : qp info
+ * @rq_buf : receive queue buffer
+ */
+static void xrnic_cm_ready_to_use_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_ESTABLISHD;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_ESTABLISHD;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ } else {
+ pr_err("%s CM_ID is NULL\n", __func__);
+ }
+ pr_info("Connection Established Local QPn=%x\n", qp_attr->qp_num);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_create_child_cm() - creates child cm.
+ * @cm_id_info : to update child cm info after creation
+ */
+static void xrnic_create_child_cm(struct xrnic_rdma_cm_id_info *cm_id_info)
+{
+ struct xrnic_rdma_cm_id *ch_cm;
+
+ ch_cm = kzalloc(sizeof(*ch_cm), GFP_ATOMIC);
+ cm_id_info->child_cm_id = ch_cm;
+}
+
+/**
+ * xrnic_cm_connect_request_handler() - handles connect request packets.
+ * @qp_attr : qp info
+ * @rq_buf : receive queue buffer
+ */
+static void xrnic_cm_connect_request_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4 = NULL;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6 = NULL;
+ struct mad *mad = NULL;
+ struct req *req = NULL;
+ int qp1_send_pkt_size, child_qp_num, status;
+ enum xrnic_rej_reason reason = XRNIC_REJ_CONSUMER_REJECT;
+ enum xrnic_msg_rej msg_rej;
+ enum xrnic_msg_mra msg_mra;
+ u16 port_num;
+ void *temp;
+ struct xrnic_rdma_cm_id *child_cm_id;
+ struct xrnic_rdma_cm_id *parent_cm_id;
+ struct xrnic_rdma_cm_id_info *child_cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ }
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REQ_RCVD;
+
+ DEBUG_LOG("req-> local_cm_resp_tout:%x.\n", req->local_cm_resp_tout);
+ DEBUG_LOG("req-> path_packet_payload_mtu:%x.\n",
+ req->path_packet_payload_mtu);
+ if (req->remote_cm_resp_tout < XRNIC_REQ_REMOTE_CM_RESP_TOUT) {
+ pr_info("remote_cm_resp_tout:%x", req->remote_cm_resp_tout);
+
+ msg_mra = XRNIC_MRA_REQ;
+ xrnic_cm_prepare_mra(qp_attr, msg_mra, rq_buf);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr, qp1_send_pkt_size);
+ qp_attr->curr_state = XRNIC_MRA_SENT;
+ }
+
+ temp = (char *)&req->private_data;
+ temp += 36;
+ port_num = htons(req->service_id[6] | req->service_id[7] << 8);
+ DEBUG_LOG("req-> service_id[0]:%x.\n", req->service_id[0]);
+ DEBUG_LOG("req-> service_id[1]:%x.\n", req->service_id[1]);
+ DEBUG_LOG("req-> service_id[2]:%x.\n", req->service_id[2]);
+ DEBUG_LOG("req-> service_id[3]:%x.\n", req->service_id[3]);
+ DEBUG_LOG("req-> service_id[4]:%x.\n", req->service_id[4]);
+ DEBUG_LOG("req-> service_id[5]:%x.\n", req->service_id[5]);
+ DEBUG_LOG("req-> service_id[6]:%x.\n", req->service_id[6]);
+ DEBUG_LOG("req-> service_id[7]:%x.\n", req->service_id[7]);
+ DEBUG_LOG("req->port_num:%d,%x\n", port_num, port_num);
+
+ if (xrnic_dev->port_status[port_num - 1] == XRNIC_PORT_QP_FREE ||
+ port_num < 1 || port_num > XRNIC_MAX_PORT_SUPPORT) {
+ /*We need to validate that.*/
+ pr_err("PORT number is not correct sending rej.\n");
+ reason = XRNIC_REJ_PRIM_LID_PORT_NOT_EXIST;
+ msg_rej = XRNIC_REJ_REQ;
+ goto send_rep_rej;
+ }
+
+ xrnic_create_child_cm(xrnic_dev->cm_id_info[port_num - 1]);
+ child_qp_num =
+ xrnic_dev->cm_id_info[port_num - 1]->parent_cm_id.child_qp_num++;
+ child_cm_id = xrnic_dev->cm_id_info[port_num - 1]->child_cm_id;
+ parent_cm_id = &xrnic_dev->cm_id_info[port_num - 1]->parent_cm_id;
+ child_cm_id->cm_id_info = xrnic_dev->cm_id_info[port_num - 1];
+ child_cm_id->cm_context = parent_cm_id->cm_context;
+ child_cm_id->ps = parent_cm_id->ps;
+ child_cm_id->xrnic_cm_handler = parent_cm_id->xrnic_cm_handler;
+ child_cm_id->local_cm_id = qp_attr->local_cm_id;
+ child_cm_id->port_num = port_num;
+ child_cm_id->child_qp_num = child_qp_num + 1;
+ child_cm_id->qp_info.qp_num = qp_attr->qp_num;
+ child_cm_id->qp_status = XRNIC_PORT_QP_FREE;
+ child_cm_id_info = child_cm_id->cm_id_info;
+ child_cm_id_info->conn_event_info.cm_event = XRNIC_REQ_RCVD;
+ child_cm_id_info->conn_event_info.status = 0;
+ child_cm_id_info->conn_event_info.private_data = (void *)temp;
+ child_cm_id_info->conn_event_info.private_data_len = 32;
+ list_add_tail(&child_cm_id->list, &cm_id_list);
+ status = parent_cm_id->xrnic_cm_handler(child_cm_id,
+ &child_cm_id_info->conn_event_info);
+ if (status) {
+ pr_err("xrnic_cm_handler failed sending rej.\n");
+ reason = XRNIC_REJ_CONSUMER_REJECT;
+ msg_rej = XRNIC_REJ_REQ;
+ goto send_rep_rej;
+ }
+
+ qp_attr->remote_cm_id = req->local_cm_id;
+ qp_attr->cm_id = child_cm_id;
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ qp_attr->ipv4_addr = recv_qp_pkt_ipv4->ipv4.src_addr;
+ memcpy(&qp_attr->mac_addr,
+ &recv_qp_pkt_ipv4->eth.h_source, XRNIC_ETH_ALEN);
+ qp_attr->source_qp_num = recv_qp_pkt_ipv4->deth.src_qp;
+ } else {
+ memcpy(&qp_attr->ipv6_addr,
+ &recv_qp_pkt_ipv6->ipv6.saddr,
+ sizeof(struct in6_addr));
+ memcpy(&qp_attr->mac_addr,
+ &recv_qp_pkt_ipv6->eth.h_source, XRNIC_ETH_ALEN);
+ qp_attr->source_qp_num = recv_qp_pkt_ipv6->deth.src_qp;
+ }
+
+ xrnic_cm_prepare_rep(qp_attr, rq_buf);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr, qp1_send_pkt_size);
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REP_SENT;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+ return;
+send_rep_rej:
+
+ qp_attr->remote_cm_id = req->local_cm_id;
+
+ xrnic_cm_prepare_rej(qp_attr, msg_rej, reason);
+ /* Reject code added end */
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr, qp1_send_pkt_size);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp, qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REJ_SENT;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s with reject reason [%d]\n", __func__, reason);
+}
+
+/**
+ * fill_cm_rtu_data() - Fills rtu data to send rtu packet.
+ * @cm_id : CM ID
+ * @send_sgl_qp1 : data pointer
+ * @cm_req_size : total header size
+ * @return: send_sgl_qp1 data pointer
+ */
+static char *fill_cm_rtu_data(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ struct cma_rtu *rtu_data;
+
+ SET_CM_HDR(send_sgl_qp1);
+ rtu_data = (struct cma_rtu *)send_sgl_qp1;
+ memset(rtu_data, 0, sizeof(*rtu_data));
+ rtu_data->local_comm_id = cm_id->local_cm_id;
+ rtu_data->remote_comm_id = cm_id->remote_cm_id;
+ return send_sgl_qp1;
+}
+
+/**
+ * fill_cm_req_data() - Fills request data to send in request packet.
+ * @cm_id : CM ID
+ * @send_sgl_qp1 : data pointer
+ * @cm_req_size : total header size
+ * @return: send_sgl_qp1 data pointer
+ */
+static char *fill_cm_req_data(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ struct ernic_cm_req *cm_req;
+ struct cma_hdr data;
+ int val;
+ int sgid, dgid;
+ unsigned int psn;
+ struct sockaddr_in *sin4, *din4;
+
+ sin4 = (struct sockaddr_in *)&cm_id->route.s_addr;
+ din4 = (struct sockaddr_in *)&cm_id->route.d_addr;
+
+ SET_CM_HDR(send_sgl_qp1);
+ cm_req = (struct ernic_cm_req *)send_sgl_qp1;
+ memset(cm_req, 0, sizeof(*cm_req));
+
+ cm_req->local_comm_id = cpu_to_be32(cm_id->local_cm_id);
+ cm_req->service_id = cpu_to_be64((cm_id->ps << 16) |
+ be16_to_cpu(din4->sin_port));
+ ether_addr_copy(&cm_req->local_ca_guid, &cm_id->route.smac);
+ cm_req->local_qkey = 0;
+ cm_req->offset32 = cpu_to_be32((cm_id->local_cm_id << 8) |
+ cm_id->conn_param.responder_resources);
+ cm_req->offset36 = cpu_to_be32 (cm_id->conn_param.initiator_depth);
+
+ val = (XRNIC_REQ_LOCAL_CM_RESP_TOUT | (XRNIC_SVC_TYPE_UC << 5) |
+ (cm_id->conn_param.flow_control << 7));
+ cm_req->offset40 = cpu_to_be32(val);
+ get_random_bytes(&psn, 24);
+ psn &= 0xFFFFFF;
+ val = ((psn << 8) | XRNIC_REQ_REMOTE_CM_RESP_TOUT |
+ (cm_id->conn_param.retry_count << 5));
+ cm_req->offset44 = cpu_to_be32(val);
+ cm_id->qp_info.starting_psn = psn;
+
+ cm_req->pkey = 0xFFFF;
+ cm_req->offset50 = ((1 << 4) |
+ (cm_id->conn_param.rnr_retry_count << 5));
+ cm_req->offset51 = (1 << 4);
+ cm_req->local_lid = cpu_to_be16(0xFFFF);
+ cm_req->remote_lid = cpu_to_be16(0xFFFF);
+ sgid = sin4->sin_addr.s_addr;
+ dgid = din4->sin_addr.s_addr;
+ val = cpu_to_be32(0xFFFF);
+ memcpy(cm_req->local_gid.raw + 8, &val, 4);
+ memcpy(cm_req->local_gid.raw + 12, &sgid, 4);
+ memcpy(cm_req->remote_gid.raw + 8, &val, 4);
+ memcpy(cm_req->remote_gid.raw + 12, &dgid, 4);
+ cm_req->offset88 = cpu_to_be32(1 << 2);
+ cm_req->traffic_class = 0;
+ cm_req->hop_limit = 0x40;
+ cm_req->offset94 = 0;
+ cm_req->offset95 = 0x18;
+
+ data.cma_version = CMA_VERSION;
+ data.ip_version = (4 << 4);
+ data.port = din4->sin_port;
+ data.src_addr.ip4.addr = sin4->sin_addr.s_addr;
+ data.dst_addr.ip4.addr = din4->sin_addr.s_addr;
+ memcpy(cm_req->private_data, &data, sizeof(data));
+
+ return send_sgl_qp1;
+}
+
+/**
+ * fill_ipv4_cm_req() - fills cm request data for rdma connect.
+ * @cm_id : CM ID
+ * @send_sgl_qp1 : data pointer
+ * @cm_req_size : total header size
+ */
+void fill_ipv4_cm_req(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ send_sgl_qp1 = fill_ipv4_headers(cm_id, send_sgl_qp1, cm_req_size);
+ send_sgl_qp1 = fill_mad_common_header(cm_id, send_sgl_qp1,
+ cm_req_size, CM_REQ_ATTR_ID);
+ send_sgl_qp1 = fill_cm_req_data(cm_id, send_sgl_qp1, cm_req_size);
+}
+
+/**
+ * xrnic_cm_send_rtu() - Sends Ready to use packet.
+ * @cm_id : CM ID
+ * @cm_rep : IPV4 mad data
+ */
+static void xrnic_cm_send_rtu(struct xrnic_rdma_cm_id *cm_id,
+ struct rep *cm_rep)
+{
+ int cm_req_size;
+ char *send_sgl_qp1, *head;
+
+ cm_req_size = sizeof(struct ethhdr) + sizeof(struct iphdr) +
+ sizeof(struct udphdr) + IB_BTH_BYTES + IB_DETH_BYTES +
+ sizeof(struct ib_mad_hdr) + sizeof(struct cma_rtu) +
+ EXTRA_PKT_LEN;
+
+ head = kmalloc(cm_req_size, GFP_ATOMIC);
+ send_sgl_qp1 = head;
+ send_sgl_qp1 = fill_ipv4_headers(cm_id, send_sgl_qp1, cm_req_size);
+ send_sgl_qp1 = fill_mad_common_header(cm_id, send_sgl_qp1,
+ cm_req_size, CM_RTU_ATTR_ID);
+ send_sgl_qp1 = fill_cm_rtu_data(cm_id, send_sgl_qp1, cm_req_size);
+ xrnic_send_mad(head, cm_req_size - EXTRA_PKT_LEN);
+}
+
+/*
+ * xrnic_rdma_accept() - This function implements incoming connect request.
+ * accept functionality
+ * @cm_id : CM ID of the incoming connect request
+ * @conn_param : Connection parameters
+ * @return: XRNIC_SUCCESS if successfully accepts the connection,
+ * otherwise error representative value
+ */
+int xrnic_rdma_accept(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param)
+{
+ struct xrnic_qp_info *qp_info;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1] !=
+ XRNIC_PORT_QP_IN_USE)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE)
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_info = &cm_id->qp_info;
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->sq_depth > XRNIC_MAX_SQ_DEPTH ||
+ qp_info->rq_depth > XRNIC_MAX_RQ_DEPTH ||
+ qp_info->send_sge_size > XRNIC_MAX_SEND_SGL_SIZE ||
+ qp_info->send_pkt_size > XRNIC_MAX_SEND_PKT_SIZE)
+ return -XRNIC_INVALID_QP_INIT_ATTR;
+
+ /*Return Error if wrong conn_param is coming.*/
+ if (conn_param->private_data_len > XRNIC_CM_PRVATE_DATA_LENGTH ||
+ conn_param->responder_resources > XRNIC_RESPONDER_RESOURCES ||
+ conn_param->initiator_depth > XRNIC_INITIATOR_DEPTH ||
+ conn_param->flow_control > 1 ||
+ conn_param->retry_count > XRNIC_REQ_RETRY_COUNT ||
+ conn_param->rnr_retry_count > XRNIC_REP_RNR_RETRY_COUNT)
+ return -XRNIC_INVALID_QP_CONN_PARAM;
+
+ memcpy((void *)&cm_id->conn_param.private_data,
+ (void *)&conn_param->private_data,
+ conn_param->private_data_len);
+ cm_id->conn_param.private_data_len = conn_param->private_data_len;
+ cm_id->conn_param.responder_resources =
+ conn_param->responder_resources;
+ cm_id->conn_param.initiator_depth = conn_param->initiator_depth;
+ cm_id->conn_param.flow_control = conn_param->flow_control;
+ cm_id->conn_param.retry_count = conn_param->retry_count;
+ cm_id->conn_param.rnr_retry_count = conn_param->rnr_retry_count;
+
+ xrnic_qp_app_configuration(qp_info->qp_num, XRNIC_HW_QP_ENABLE);
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_accept);
+
+/*
+ * xrnic_rdma_disconnect() - This function implements RDMA disconnect.
+ * @cm_id : CM ID to destroy or disconnect
+ * @return: XRNIC_SUCCESS if successfully disconnects
+ * otherwise error representative value
+ */
+int xrnic_rdma_disconnect(struct xrnic_rdma_cm_id *cm_id)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+ int i;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1]) {
+ if (cm_id->local_cm_id >= 2) {
+ if (cm_id->child_qp_num < 1)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (cm_id->qp_info.qp_num) {
+ pr_err("CM ID of QP is not destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ if (cm_id->qp_status == XRNIC_PORT_QP_FREE) {
+ pr_err("CM ID is already destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ pr_info("Free local cm id[%d] ", cm_id->local_cm_id);
+ pr_info("Child qp number [%d] ", cm_id->child_qp_num);
+ pr_info("qp_num [%d]\n", cm_id->qp_info.qp_num);
+ cm_id->qp_status = XRNIC_PORT_QP_FREE;
+ } else if (cm_id->local_cm_id == 1) {
+ if (cm_id->qp_status == XRNIC_PORT_QP_FREE) {
+ pr_err("CM ID is already destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ cm_id_info = (struct xrnic_rdma_cm_id_info *)
+ cm_id->cm_id_info;
+ for (i = 0; i < cm_id_info->num_child; i++) {
+ if (cm_id_info->child_cm_id[i].qp_status ==
+ XRNIC_PORT_QP_IN_USE){
+ pr_err("child CM IDs not destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ }
+ cm_id->qp_status = XRNIC_PORT_QP_FREE;
+ } else {
+ pr_err("Received invalid CM ID\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ } else {
+ pr_err("Received invalid Port ID\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_disconnect);
+
+/*
+ * xrnic_rdma_destroy_id() - Function destroys CM ID of the channel.
+ * @cm_id : CM ID of the incoming connect request
+ * @flag : Flag to indicate disconnect send
+ * @return: XRNIC_SUCCESS if successfully,
+ * otherwise error representative value
+ */
+int xrnic_rdma_destroy_id(struct xrnic_rdma_cm_id *cm_id, int flag)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+ int i;
+ u32 local_cm_id = cm_id->local_cm_id;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1]) {
+ if (local_cm_id >= 2) {
+ if (cm_id->child_qp_num < 1)
+ return -XRNIC_INVALID_CM_ID;
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE) {
+ pr_err("CM ID is not destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ if (flag)
+ xrnic_cm_disconnect_send_handler
+ (&xrnic_dev->qp_attr[local_cm_id - 2]);
+
+ pr_info("Free local cm id[%d] ", cm_id->local_cm_id);
+ pr_info("Child qp number [%d] ", cm_id->child_qp_num);
+ pr_info("qp_num [%d]\n", cm_id->qp_info.qp_num);
+
+ cm_id_info =
+ xrnic_dev->cm_id_info[cm_id->port_num - 1];
+ cm_id_info->parent_cm_id.child_qp_num--;
+ __list_del_entry(&cm_id->list);
+ kfree(cm_id);
+ } else if (local_cm_id == 1) {
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE) {
+ pr_err("CM ID is already destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+
+ cm_id_info = (struct xrnic_rdma_cm_id_info *)
+ cm_id->cm_id_info;
+ for (i = 0; i < cm_id_info->num_child; i++) {
+ if (cm_id_info->child_cm_id[i].qp_status ==
+ XRNIC_PORT_QP_IN_USE) {
+ pr_err("child CM IDs not destroyed\n");
+ return XRNIC_INVALID_CM_ID;
+ }
+ }
+ xrnic_dev->io_qp_count = xrnic_dev->io_qp_count +
+ cm_id_info->num_child;
+ xrnic_dev->cm_id_info[cm_id->port_num - 1] = NULL;
+ xrnic_dev->port_status[cm_id->port_num - 1] =
+ XRNIC_PORT_QP_FREE;
+ __list_del_entry(&cm_id->list);
+ kfree(cm_id_info->child_cm_id);
+ kfree(cm_id_info);
+ } else {
+ pr_err("Received invalid CM ID\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ } else {
+ return -XRNIC_INVALID_CM_ID;
+ }
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_destroy_id);
+
+/*
+ * xrnic_send_mad() - This function initiates sending a management packet on
+ * QP1.
+ * @send_buf : Input buffer to fill
+ * @size : Size of the send buffer
+ */
+void xrnic_send_mad(void *send_buf, u32 size)
+{
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+
+ xrnic_qp1_send_mad_pkt(send_buf, qp1_attr, size);
+}
+EXPORT_SYMBOL(xrnic_send_mad);
+
+/*
+ * xrnic_identify_remote_host () - This function searches internal data.
+ * structures for remote info
+ * @rq_buf : received data buffer from other end
+ * @qp_num : QP number on which packet has been received
+ * @return: XRNIC_SUCCESS if remote end info is available,
+ * XRNIC_FAILED otherwise
+ */
+int xrnic_identify_remote_host(void *rq_buf, int qp_num)
+{
+ /* First find our Which IP version came from IPV packet and accrdingly
+ * Compare IP address from eiither AF_INET or AF_INET6.
+ */
+ /* It may be two condition of failure, either we just bypass this
+ * CONNECT_REQUEST as we have alrady there or there
+ * is no QP free at all.
+ */
+ struct mad *mad;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+
+ if (qp1_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ }
+
+ if (htons(mad->attribute_id) == CONNECT_REQUEST) {
+ if (qp1_attr->ip_addr_type == AF_INET6) {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ (!memcmp(&xrnic_dev->qp1_attr.ipv6_addr,
+ &xrnic_dev->qp_attr[qp_num].ipv6_addr,
+ sizeof(struct in6_addr))))
+ return XRNIC_SUCCESS;
+ } else {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ xrnic_dev->qp1_attr.ipv4_addr ==
+ xrnic_dev->qp_attr[qp_num].ipv4_addr)
+ return XRNIC_SUCCESS;
+ }
+ } else {
+ /* Need to Compare udp->source_port,ethernet->source_mac,
+ * ip->source_ip, deth->source_qp == 1, local_cm_id is le
+ */
+
+ if (qp1_attr->ip_addr_type == AF_INET6) {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ mad->data[1] ==
+ xrnic_dev->qp_attr[qp_num].local_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ (!memcmp(&xrnic_dev->qp1_attr.ipv6_addr,
+ &xrnic_dev->qp_attr[qp_num].ipv6_addr,
+ sizeof(struct in6_addr))))
+
+ return XRNIC_SUCCESS;
+ } else {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ mad->data[1] ==
+ xrnic_dev->qp_attr[qp_num].local_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ xrnic_dev->qp1_attr.ipv4_addr ==
+ xrnic_dev->qp_attr[qp_num].ipv4_addr)
+
+ return XRNIC_SUCCESS;
+ }
+ }
+ return XRNIC_FAILED;
+}
+
+/*
+ * xrnic_rdma_resolve_addr() - This function looks for a destination.
+ * address and initiates ARP if required
+ * @cm_id : CM channel ID which is being used for connection set up
+ * @src_addr : IPV4/IPV6 address of the source
+ * @dst_addr : IPV4/IPV6 address of the destination
+ * @timeout : Address resolve timeout
+ * @return: SUCCESS value if route resolved or error representative value
+ * otherwise
+ */
+int xrnic_rdma_resolve_addr(struct xrnic_rdma_cm_id *cm_id,
+ struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int timeout)
+{
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct neighbour *n;
+ int arp_retry = 3;
+ int ret = 0;
+ struct sockaddr_in sin4, *din4;
+ struct net_device *net_dev;
+ struct xrnic_rdma_cm_event_info event;
+
+ net_dev = dev_get_by_name(&init_net, "eth0");
+ memset(&fl4, 0, sizeof(fl4));
+ din4 = (struct sockaddr_in *)dst_addr;
+ fl4.daddr = din4->sin_addr.s_addr;
+ rt = ip_route_output_key(&init_net, &fl4);
+ if (IS_ERR(rt)) {
+ event.cm_event = XRNIC_CM_EVENT_ADDR_ERROR;
+ event.status = PTR_ERR(rt);
+ cm_id->xrnic_cm_handler(cm_id, &event);
+ ret = PTR_ERR(rt);
+ goto err;
+ }
+
+ event.cm_event = XRNIC_CM_EVENT_ADDR_RESOLVED;
+ event.status = 0;
+ cm_id->xrnic_cm_handler(cm_id, &event);
+
+ sin4.sin_addr.s_addr = fl4.saddr;
+ sin4.sin_port = cpu_to_be16(ERNIC_UDP_SRC_PORT);
+ sin4.sin_family = dst_addr->sa_family;
+
+ /* HACK: ARP is not resolved for the first time, retries are needed */
+ do {
+ n = rt->dst.ops->neigh_lookup(&rt->dst, NULL, &fl4.daddr);
+ } while (arp_retry-- > 0);
+
+ if (IS_ERR(n))
+ pr_info("ERNIC neigh lookup failed\n");
+
+ memcpy(&cm_id->route.s_addr, &sin4, sizeof(sin4));
+ memcpy(&cm_id->route.d_addr, dst_addr, sizeof(*dst_addr));
+ ether_addr_copy(cm_id->route.smac, net_dev->dev_addr);
+ ether_addr_copy(cm_id->route.dmac, n->ha);
+ event.cm_event = XRNIC_CM_EVENT_ROUTE_RESOLVED;
+ event.status = 0;
+ cm_id->xrnic_cm_handler(cm_id, &event);
+err:
+ return ret;
+}
+EXPORT_SYMBOL(xrnic_rdma_resolve_addr);
+
+/*
+ * fill_ipv4_headers() - This function fills the IPV4 address for an
+ * outgoing packet.
+ * @cm_id : CM ID info for addresses
+ * @send_sgl_qp1 : SGL info
+ * @cm_req_size : request size
+ * @return: pointer to SGL info
+ */
+char *fill_ipv4_headers(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *udph;
+ struct sockaddr_in *sin4, *din4;
+
+ sin4 = (struct sockaddr_in *)&cm_id->route.s_addr;
+ din4 = (struct sockaddr_in *)&cm_id->route.d_addr;
+
+ SET_ETH_HDR(send_sgl_qp1);
+ eth = (struct ethhdr *)send_sgl_qp1;
+ ether_addr_copy(&eth->h_dest, &cm_id->route.dmac);
+ ether_addr_copy(&eth->h_source, &cm_id->route.smac);
+ eth->h_proto = cpu_to_be16(ETH_P_IP);
+
+ SET_IP_HDR(send_sgl_qp1);
+ iph = (struct iphdr *)send_sgl_qp1;
+ iph->ihl = 5;
+ iph->version = 4;
+ iph->ttl = 32;
+ iph->tos = 0;
+ iph->protocol = IPPROTO_UDP;
+ iph->saddr = sin4->sin_addr.s_addr;
+ iph->daddr = din4->sin_addr.s_addr;
+ iph->id = 0;
+ iph->frag_off = cpu_to_be16(0x2 << 13);
+ iph->tot_len = cpu_to_be16(cm_req_size - ETH_HLEN);
+
+ ip_send_check(iph);
+
+ SET_NET_HDR(send_sgl_qp1);
+ udph = (struct udphdr *)send_sgl_qp1;
+ udph->source = sin4->sin_port;
+ udph->dest = din4->sin_port;
+ udph->len = cpu_to_be16(cm_req_size - ETH_HLEN - (iph->ihl * 4));
+ udph->check = 0;
+
+ return send_sgl_qp1;
+}
+
+/*
+ * fill_mad_common_header() - This function fills the MAD headers.
+ * @cm_id : CM ID info
+ * @send_sgl_qp1 : SGL info
+ * @cm_req_size : request size
+ * @cm_attr : cm attribute ID
+ * @return: pointer to SGL info
+ */
+char *fill_mad_common_header(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size,
+ int cm_attr)
+{
+ struct ib_bth *bth;
+ struct ib_deth *deth;
+ struct ib_mad_hdr *madh;
+ int val;
+
+ SET_BTH_HDR(send_sgl_qp1);
+ bth = (struct ib_bth *)send_sgl_qp1;
+ memset(bth, 0, sizeof(*bth));
+ val = (BTH_SET(OPCODE, IB_OPCODE_UD_SEND_ONLY) |
+ BTH_SET(SE, XRNIC_SET_SOLICT_EVENT) |
+ BTH_SET(MIG, XRNIC_MIGRATION_REQ) |
+ BTH_SET(PAD, XRNIC_PAD_COUNT) |
+ BTH_SET(TVER, XRNIC_TRANSPORT_HDR_VER) |
+ BTH_SET(PKEY, 65535));
+ bth->offset0 = cpu_to_be32(val);
+ bth->offset4 = cpu_to_be32(BTH_SET(DEST_QP, 1));
+ bth->offset8 = cpu_to_be32(BTH_SET(PSN, psn_num++));
+
+ SET_DETH_HDR(send_sgl_qp1);
+ deth = (struct ib_deth *)send_sgl_qp1;
+ deth->offset0 = cpu_to_be32 (IB_ENFORCED_QEY);
+ deth->offset4 = cpu_to_be32 (DETH_SET(SQP, 2));
+
+ SET_MAD_HDR(send_sgl_qp1);
+ madh = (struct ib_mad_hdr *)send_sgl_qp1;
+ memset(madh, 0, sizeof(*madh));
+ madh->base_version = IB_MGMT_BASE_VERSION;
+ madh->mgmt_class = IB_MGMT_CLASS_CM;
+ madh->class_version = IB_CM_CLASS_VER;
+ madh->method = IB_MGMT_METHOD_SEND;
+ madh->attr_id = cm_attr;
+ madh->tid = cpu_to_be64(mad_tid++);
+ madh->status = 0;
+ madh->class_specific = 0;
+ madh->attr_mod = 0;
+
+ return send_sgl_qp1;
+}
+
+/*
+ * xrnic_rdma_connect() - This function initiates connetion process.
+ * @cm_id : CM ID info
+ * @conn_param : Connection parameters for the new connection
+ * @return: XRNIC_SUCCESS
+ */
+int xrnic_rdma_connect(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param)
+{
+ int cm_req_size;
+ char *send_sgl_qp1, *head;
+
+ cm_req_size = sizeof(struct ethhdr) + sizeof(struct iphdr) +
+ sizeof(struct udphdr) + IB_BTH_BYTES + IB_DETH_BYTES +
+ sizeof(struct ib_mad_hdr) +
+ sizeof(struct ernic_cm_req) + EXTRA_PKT_LEN;
+
+ head = kmalloc(cm_req_size, GFP_ATOMIC);
+ send_sgl_qp1 = head;
+ memcpy(&cm_id->conn_param, conn_param, sizeof(*conn_param));
+ fill_ipv4_cm_req(cm_id, send_sgl_qp1, cm_req_size);
+ xrnic_send_mad(head, cm_req_size - EXTRA_PKT_LEN);
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_connect);
+
+/*
+ * xrnic_process_mad_pkt() - This function process a received MAD packet.
+ * @rq_buf : receive queue pointer
+ * @return: XRNIC_SUCCESS if successfully processed the MAD packet otherwise
+ * XRNIC_FAILED
+ */
+static int xrnic_process_mad_pkt(void *rq_buf)
+{
+ int ret = 0;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct deth *deth;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+
+ if (qp1_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ deth = (struct deth *)&recv_qp_pkt_ipv4->deth;
+ qp1_attr->ipv4_addr = recv_qp_pkt_ipv4->ipv4.src_addr;
+ memcpy(&qp1_attr->mac_addr,
+ &recv_qp_pkt_ipv4->eth.h_source, XRNIC_ETH_ALEN);
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ deth = (struct deth *)&recv_qp_pkt_ipv6->deth;
+ memcpy(&qp1_attr->ipv6_addr,
+ &recv_qp_pkt_ipv6->ipv6.saddr,
+ sizeof(struct in6_addr));
+ memcpy(&qp1_attr->mac_addr,
+ &recv_qp_pkt_ipv6->eth.h_source,
+ XRNIC_ETH_ALEN);
+ }
+ qp1_attr->source_qp_num = deth->src_qp;
+
+ ret = xrnic_cm_establishment_handler(rq_buf);
+ if (ret) {
+ pr_err("cm establishment failed with ret code %d\n", ret);
+ return XRNIC_FAILED;
+ }
+
+ return XRNIC_SUCCESS;
+}
+
+/*
+ * xrnic_mad_pkt_recv_intr_handler() - Interrupt handler for MAD packet
+ * interrupt type
+ * @data : XRNIC device info
+ */
+void xrnic_mad_pkt_recv_intr_handler(unsigned long data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_memory_map *xrnic_mmap = (struct xrnic_memory_map *)
+ qp1_attr->xrnic_mmap;
+ struct rdma_qp1_attr *rdma_qp1_attr = (struct rdma_qp1_attr *)
+ &xrnic_mmap->xrnic_regs->rdma_qp1_attr;
+ u32 config_value = 0;
+ u8 rq_buf[XRNIC_RECV_PKT_SIZE];
+ void *rq_buf_temp, *rq_buf_unaligned;
+ int ret = 0, j, rq_pkt_num = 0, rq_pkt_count = 0;
+ struct ethhdr_t *ethhdr;
+ unsigned long flag;
+
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ rq_buf_unaligned = (void *)rq_buf;
+
+ /* We need to maintain sq_cmpl_db_local as per hardware update
+ * for Queue spesific sq_cmpl_db_local register
+ * Also in case of resend some packect we
+ * need to maintain this variable
+ */
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp1_attr->qp_num - 1)));
+ pr_info("config_value = %d, db_local = %d\n",
+ config_value, qp1_attr->rq_wrptr_db_local);
+ if (qp1_attr->rq_wrptr_db_local == config_value) {
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+ return;
+ }
+
+ if (qp1_attr->rq_wrptr_db_local > config_value)
+ rq_pkt_count = (config_value + XRNIC_RQ_DEPTH) -
+ qp1_attr->rq_wrptr_db_local;
+ else
+ rq_pkt_count = config_value - qp1_attr->rq_wrptr_db_local;
+
+ DEBUG_LOG("rx pkt count = 0x%x\n", rq_pkt_count);
+ for (j = 0 ; j < rq_pkt_count ; j++) {
+ config_value = ioread32((char *)xrnic_mmap->sq_cmpl_db_add +
+ (4 * (qp1_attr->qp_num - 1)));
+
+ rq_pkt_num = qp1_attr->rq_wrptr_db_local;
+ if (rq_pkt_num >= XRNIC_RQ_DEPTH)
+ rq_pkt_num = rq_pkt_num - XRNIC_RQ_DEPTH;
+
+ ethhdr = (struct ethhdr_t *)((char *)qp1_attr->rq_buf_ba_ca +
+ (rq_pkt_num * XRNIC_RECV_PKT_SIZE));
+
+ if (ethhdr->eth_type == htons(XRNIC_ETH_P_IP)) {
+ rq_buf_temp = (char *)qp1_attr->rq_buf_ba_ca +
+ (rq_pkt_num * XRNIC_RECV_PKT_SIZE);
+ memcpy((char *)rq_buf_unaligned,
+ (char *)rq_buf_temp, XRNIC_RECV_PKT_SIZE);
+ qp1_attr->ip_addr_type = AF_INET;
+ } else {
+ rq_buf_temp = (char *)qp1_attr->rq_buf_ba_ca +
+ (rq_pkt_num * XRNIC_RECV_PKT_SIZE);
+ memcpy((char *)rq_buf_unaligned,
+ (char *)rq_buf_temp, XRNIC_RECV_PKT_SIZE);
+ qp1_attr->ip_addr_type = AF_INET6;
+ }
+ ret = xrnic_process_mad_pkt(rq_buf_unaligned);
+
+ if (ret) {
+ DEBUG_LOG("MAD pkt processing failed for pkt num %d\n",
+ rq_pkt_num);
+ }
+
+ qp1_attr->rq_wrptr_db_local = qp1_attr->rq_wrptr_db_local + 1;
+ config_value = qp1_attr->rq_wrptr_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->rq_ci_db)));
+
+ if (qp1_attr->rq_wrptr_db_local == XRNIC_RQ_DEPTH)
+ qp1_attr->rq_wrptr_db_local = 0;
+ }
+
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_cm_establishment_handler() - handles the state after the
+ * communication is established.
+ * @rq_buf : receive queue buffer
+ * @return: 0 on success, -1 incase of failure
+ */
+int xrnic_cm_establishment_handler(void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ struct mad *mad;
+ struct req *req;
+ struct rep *rep;
+ struct deth *deth;
+ struct xrnic_qp_attr *qp_attr;
+ int i = 0, ret;
+ enum xrnic_rej_reason reason;
+ enum xrnic_msg_rej msg;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ int qp1_send_pkt_size;
+ struct xrnic_rdma_cm_id *cm_id, *tmp;
+ struct sockaddr_in *din4;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp1_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ }
+ switch (htons(mad->attribute_id)) {
+ case CONNECT_REQUEST:
+ DEBUG_LOG("Connect request recevied\n");
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ ret = xrnic_find_free_qp();
+ DEBUG_LOG("Q pair no:%x, i = %d\n", ret, i);
+ if (ret < 0) {
+ qp_attr = qp1_attr;
+ qp_attr->ip_addr_type = qp1_attr->ip_addr_type;
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ pr_err("no QP is free for connection.\n");
+ reason = XRNIC_REJ_NO_QP_AVAILABLE;
+ msg = XRNIC_REJ_REQ;
+ qp_attr->remote_cm_id = req->local_cm_id;
+ xrnic_cm_prepare_rej(qp_attr, msg, reason);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ return XRNIC_FAILED;
+ }
+ i = ret;
+ }
+
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_LISTEN ||
+ qp_attr->curr_state == XRNIC_MRA_SENT ||
+ qp_attr->curr_state == XRNIC_REJ_SENT ||
+ qp_attr->curr_state == XRNIC_REP_SENT ||
+ qp_attr->curr_state == XRNIC_ESTABLISHD) {
+ qp_attr->ip_addr_type = qp1_attr->ip_addr_type;
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_connect_request_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state for Connect Request\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case READY_TO_USE:
+ DEBUG_LOG("RTU received\n");
+
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection. in RTU\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_REP_SENT ||
+ qp_attr->curr_state == XRNIC_MRA_RCVD) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_ready_to_use_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to serve RTU\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case MSG_RSP_ACK:
+ DEBUG_LOG("Message received Ack interrupt\n");
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_REP_SENT) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_msg_rsp_ack_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to serve MSG RSP ACK\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case CONNECT_REPLY:
+ DEBUG_LOG("Connect reply received\n");
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ rep = (struct rep *)&recv_qp_pkt_ipv4->mad.data;
+ deth = (struct deth *)&recv_qp_pkt_ipv4->deth;
+ list_for_each_entry_safe(cm_id, tmp, &cm_id_list, list) {
+ if (cm_id->local_cm_id ==
+ be32_to_cpu(rep->remote_comm_id))
+ break;
+ }
+ /* Something wrong if qp num is 0. Don't send Reply
+ * TODO: Send Reject instead of muting the Reply
+ */
+ if (cm_id->qp_info.qp_num == 0)
+ goto done;
+ cm_id->local_cm_id = rep->remote_comm_id;
+ cm_id->remote_cm_id = rep->local_cm_id;
+ qp_attr = &xrnic_dev->qp_attr[(cm_id->qp_info.qp_num - 2)];
+ qp_attr->local_cm_id = rep->remote_comm_id;
+ qp_attr->remote_cm_id = rep->local_cm_id;
+ qp_attr->remote_qp = (be32_to_cpu(rep->local_qpn) >> 8);
+ qp_attr->source_qp_num = (deth->src_qp);
+ qp_attr->starting_psn = (cm_id->qp_info.starting_psn - 1);
+ qp_attr->rem_starting_psn = (rep->start_psn[2] |
+ rep->start_psn[1] << 8 |
+ rep->start_psn[0] << 16);
+ ether_addr_copy(qp_attr->mac_addr, cm_id->route.dmac);
+ din4 = &cm_id->route.d_addr;
+ cm_id->port_num = be16_to_cpu(din4->sin_port);
+ xrnic_dev->port_status[cm_id->port_num - 1] =
+ XRNIC_PORT_QP_IN_USE;
+ qp_attr->ipv4_addr = din4->sin_addr.s_addr;
+ qp_attr->ip_addr_type = AF_INET;
+ qp_attr->cm_id = cm_id;
+ xrnic_qp_app_configuration(cm_id->qp_info.qp_num,
+ XRNIC_HW_QP_ENABLE);
+ xrnic_cm_connect_rep_handler(qp_attr, NULL);
+ xrnic_cm_send_rtu(cm_id, rep);
+ qp_attr->curr_state = XRNIC_ESTABLISHD;
+done:
+ break;
+
+ case CONNECT_REJECT:
+ DEBUG_LOG("Connect Reject received\n");
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection.\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_MRA_SENT ||
+ qp_attr->curr_state == XRNIC_REP_SENT ||
+ qp_attr->curr_state == XRNIC_MRA_RCVD) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_connect_reject_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to serve connect reject\n");
+ return XRNIC_FAILED;
+ }
+
+ break;
+
+ case DISCONNECT_REQUEST:
+ DEBUG_LOG("Disconnect request received\n");
+ for (i = 0; i < XRNIC_MAX_QP_SUPPORT; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QPis free for connection.\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_ESTABLISHD ||
+ qp_attr->curr_state == XRNIC_DREQ_SENT ||
+ qp_attr->curr_state == XRNIC_TIMEWAIT) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_disconnect_request_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to for Disconnect request\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case DISCONNECT_REPLY:
+ DEBUG_LOG("Disconnect reply received\n");
+ for (i = 0; i < XRNIC_MAX_QP_SUPPORT; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection.\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_DREQ_SENT) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_disconnect_reply_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to for Disconnect reply\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case SERVICE_ID_RESOLUTION_REQ:
+ DEBUG_LOG("Received service ID resolution request\n");
+ pr_err("Not handling service ID resolution request\n");
+ return XRNIC_FAILED;
+
+ case SERVICE_ID_RESOLUTION_REQ_REPLY:
+ DEBUG_LOG("Received service ID resolution reply\n");
+ pr_err("Not handling service ID resolution reply\n");
+ return XRNIC_FAILED;
+
+ case LOAD_ALTERNATE_PATH:
+ DEBUG_LOG("Received Load Alternate Path request\n");
+ pr_err("Not handling Load Alternate Path request\n");
+ return XRNIC_FAILED;
+
+ case ALTERNATE_PATH_RESPONSE:
+ DEBUG_LOG("Received LAP response\n");
+ pr_err("Not handling LAP response\n");
+ return XRNIC_FAILED;
+
+ default:
+ pr_err("default mad attribute 0x%x\n", mad->attribute_id);
+ break;
+ }
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+ return XRNIC_SUCCESS;
+}
diff --git a/drivers/staging/xlnx_ernic/xcm.h b/drivers/staging/xlnx_ernic/xcm.h
new file mode 100644
index 000000000000..6640b83e5166
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xcm.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+#ifndef _CM_H
+#define _CM_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+/***************************** Include Files ********************************/
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_cm.h>
+
+/************************** Constant Definitions *****************************/
+
+/* EXTRA Bytes for Invariant CRC */
+#define ERNIC_INV_CRC 4
+/* ERNIC Doesn't have Variant CRC for P2P */
+#define ERNIC_VAR_CRC 0
+#define EXTRA_PKT_LEN (ERNIC_INV_CRC + ERNIC_VAR_CRC)
+/* As per RoCEv2 Annex17, SRC PORT can be Fixed for ordering issues.
+ * So, to make things simple, ERNIC also uses constant udp source port
+ */
+#define ERNIC_UDP_SRC_PORT 0xA000
+
+#define SET_VAL(start, size, val) ((((val) & ((1U << (size)) - 1)) << (start)))
+#define GET_VAL(start, size, val) (((val) >> (start)) & ((1U << (size)) - 1))
+#define BTH_SET(FIELD, v) SET_VAL(BTH_##FIELD##_OFF, \
+ BTH_##FIELD##_SZ, v)
+#define DETH_SET(FIELD, v) SET_VAL(DETH_##FIELD##_OFF, \
+ DETH_##FIELD##_SZ, v)
+
+#define SET_HDR_OFFSET(ptr, off) ((ptr) += off)
+#define SET_CM_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct ib_mad_hdr))
+#define SET_ETH_HDR(ptr) SET_HDR_OFFSET(ptr, 0)
+#define SET_IP_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct ethhdr))
+#define SET_NET_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct iphdr))
+#define SET_BTH_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct udphdr))
+#define SET_DETH_HDR(ptr) SET_HDR_OFFSET(ptr, IB_BTH_BYTES)
+#define SET_MAD_HDR(ptr) SET_HDR_OFFSET(ptr, IB_DETH_BYTES)
+
+#define CMA_VERSION 0
+#define IB_ENFORCED_QEY 0x80010000
+#define IB_CM_CLASS_VER 2
+/*****************************************************************************/
+struct ib_bth {
+ __be32 offset0;
+#define BTH_PKEY_OFF 0
+#define BTH_PKEY_SZ 16
+#define BTH_TVER_OFF 16
+#define BTH_TVER_SZ 4
+#define BTH_PAD_OFF 20
+#define BTH_PAD_SZ 2
+#define BTH_MIG_OFF 22
+#define BTH_MIG_SZ 1
+#define BTH_SE_OFF 23
+#define BTH_SE_SZ 1
+#define BTH_OPCODE_OFF 24
+#define BTH_OPCODE_SZ 8
+ __be32 offset4;
+#define BTH_DEST_QP_OFF 0
+#define BTH_DEST_QP_SZ 24
+ __be32 offset8;
+#define BTH_PSN_OFF 0
+#define BTH_PSN_SZ 24
+#define BTH_ACK_OFF 31
+#define BTH_ACK_SZ 1
+};
+
+struct ib_deth {
+ __be32 offset0;
+#define DETH_QKEY_OFF 0
+#define DETH_QKEY_SZ 32
+ __be32 offset4;
+#define DETH_SQP_OFF 0
+#define DETH_SQP_SZ 24
+};
+
+struct cma_rtu {
+ u32 local_comm_id;
+ u32 remote_comm_id;
+ u8 private_data[224];
+};
+
+union cma_ip_addr {
+ struct in6_addr ip6;
+ struct {
+ __be32 pad[3];
+ __be32 addr;
+ } ip4;
+};
+
+/* CA11-1: IP Addressing CM REQ Message Private Data Format */
+struct cma_hdr {
+ u8 cma_version;
+ u8 ip_version; /* IP version: 7:4 */
+ __be16 port;
+ union cma_ip_addr src_addr;
+ union cma_ip_addr dst_addr;
+};
+
+enum transport_svc_type {
+ XRNIC_SVC_TYPE_RC = 0,
+ XRNIC_SVC_TYPE_UC,
+ XRNIC_SVC_TYPE_RD,
+ XRNIC_SVC_TYPE_RSVD,
+};
+
+extern struct list_head cm_id_list;
+
+void xrnic_qp1_send_mad_pkt(void *send_sgl_temp,
+ struct xrnic_qp_attr *qp1_attr, u32 send_pkt_size);
+void xrnic_reset_io_qp(struct xrnic_qp_attr *qp_attr);
+void fill_ipv4_cm_req(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size);
+char *fill_ipv4_headers(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size);
+int xrnic_cm_establishment_handler(void *rq_buf);
+char *fill_mad_common_header(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size,
+ int cm_attr);
+void xrnic_prepare_initial_headers(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf);
+void xrnic_cm_msg_rsp_ack_handler(struct xrnic_qp_attr *qp_attr, void *rq_buf);
+void xrnic_cm_disconnect_send_handler(struct xrnic_qp_attr *qp_attr);
+void xrnic_cm_prepare_rej(struct xrnic_qp_attr *qp_attr,
+ enum xrnic_rej_reason reason,
+ enum xrnic_msg_rej msg);
+void xrnic_send_mad(void *send_buf, u32 size);
+int xrnic_identify_remote_host(void *rq_buf, int qp_num);
+void xrnic_mad_pkt_recv_intr_handler(unsigned long data);
+
+struct ernic_cm_req {
+ u32 local_comm_id;
+ u32 rsvd1;
+ __u64 service_id;
+ __u64 local_ca_guid;
+ u32 rsvd2;
+ u32 local_qkey;
+ u32 offset32;
+ u32 offset36;
+ u32 offset40;
+ u32 offset44;
+ u16 pkey;
+ u8 offset50;
+ u8 offset51;
+ u16 local_lid;
+ u16 remote_lid;
+ union ib_gid local_gid;
+ union ib_gid remote_gid;
+ u32 offset88;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 offset94;
+ u8 offset95;
+ u8 rsvd3[45];
+ u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
+} __packed;
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _CM_H*/
diff --git a/drivers/staging/xlnx_ernic/xcommon.h b/drivers/staging/xlnx_ernic/xcommon.h
new file mode 100644
index 000000000000..c7d9ff6c84b6
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xcommon.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef COMMOM_INCL_H
+#define COMMOM_INCL_H
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include "xif.h"
+#include "xrocev2.h"
+#include "xhw_def.h"
+#include "xqp.h"
+#include "xcm.h"
+#include "xmr.h"
+#include "xmain.h"
+
+#define XRNIC_FAILED -1
+#define XRNIC_SUCCESS 0
+#define DEBUG_LOG(x, ...) do { \
+ if (debug)\
+ pr_info(x, ##__VA_ARGS__); \
+ } while (0)
+
+extern int debug;
+
+struct xrnic_dev_info {
+ struct xrnic_memory_map xrnic_mmap;
+ struct xrnic_qp_attr qp1_attr;
+ /* TODO: Need to allocate qp_attr on heap.
+ * when max Queue Pairs increases in the design, static memory
+ * requirement will be huge.
+ */
+ struct xrnic_qp_attr qp_attr[XRNIC_MAX_QP_SUPPORT];
+ /* DESTINATION ADDR_FAMILY - IPv4/V6 */
+ u16 ip_addr_type;
+ /* DESTINATION addr in NBO */
+ u8 ipv6_addr[16];
+ u32 pmtu;
+ /* IPV4 address */
+ u8 ipv4_addr[4];
+ u32 qp_falat_local_ptr;
+ struct xrnic_rdma_cm_id_info *curr_cm_id_info;
+ /* TODO: Need to allocate cm_id_info and port_status on heap. */
+ struct xrnic_rdma_cm_id_info *cm_id_info[XRNIC_MAX_PORT_SUPPORT];
+ enum xrnic_port_qp_status port_status[XRNIC_MAX_PORT_SUPPORT];
+ /* Interrupt for RNIC */
+ u32 xrnic_irq;
+ struct tasklet_struct mad_pkt_recv_task;
+ struct tasklet_struct qp_pkt_recv_task;
+ struct tasklet_struct qp_fatal_task;
+ struct tasklet_struct wqe_completed_task;
+ u32 io_qp_count;
+ /*Character Driver Interface*/
+ struct device_node *dev_node;
+ struct resource resource;
+ struct cdev cdev;
+ char pkt_buffer[512];
+ struct device *dev;
+};
+
+extern struct xrnic_dev_info *xrnic_dev;
+#ifdef __cplusplus
+ }
+#endif
+#endif
diff --git a/drivers/staging/xlnx_ernic/xernic_bw_test.c b/drivers/staging/xlnx_ernic/xernic_bw_test.c
new file mode 100644
index 000000000000..0f0977660621
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xernic_bw_test.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC perftest driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <net/addrconf.h>
+#include "xcommon.h"
+#include "xperftest.h"
+
+/* Default Port Number for Perftest and Depths for XRNIC */
+#define PERFTEST_PORT 18515
+#define PERFTEST_SQ_DEPTH 0x80
+#define PERFTEST_RQ_DEPTH 0x40
+/* Admin and IO QPs */
+#define PERFTEST_ADMIN_QPS 1
+#define PERFTEST_IO_QPS 1
+#define PERFTEST_MAX_QPS (PERFTEST_ADMIN_QPS + PERFTEST_IO_QPS)
+#define PERFTEST_DEFAULT_MEM_SIZE (4 * 1024 * 1024)
+
+#define _1MB_BUF_SIZ (1024 * 1024)
+#define PERF_TEST_RQ_BUF_SIZ ((_1MB_BUF_SIZ + XRNIC_RECV_PKT_SIZE) *\
+ PERFTEST_RQ_DEPTH)
+
+struct xrnic_rdma_cm_id *cm_id;
+static char server_ip[32] = "0.0.0.0";
+struct ernic_pd *pd;
+int prev_qpn;
+
+/* TODO: currently, we have single instance.
+ * Need to convert as per-instance context.
+ */
+struct perftest_ctx {
+ struct xrnic_rdma_cm_id *cm_id;
+ struct ernic_pd *pd;
+ struct mr *reg_mr; /*registered MR */
+};
+
+phys_addr_t phys_mem[PERFTEST_MAX_QPS];
+int io_mr_idx;
+struct mr *perftest_io_mr[PERFTEST_IO_QPS];
+
+struct perftest_ctx perf_context[PERFTEST_MAX_QPS];
+
+struct perftest_wr {
+ union ctx ctx;
+ __u8 reserved1[2];
+ __u32 local_offset[2];
+ __u32 length;
+ __u8 opcode;
+ __u8 reserved2[3];
+ __u32 remote_offset[2];
+ __u32 remote_tag;
+ __u32 completion_info[4];
+ __u8 reserved4[16];
+} __packed;
+
+struct xrnic_qp_init_attr qp_attr;
+
+struct perftest_trinfo {
+ phys_addr_t rq_buf_ba_phys;
+ phys_addr_t send_sgl_phys;
+ phys_addr_t sq_ba_phys;
+ phys_addr_t cq_ba_phys;
+ phys_addr_t rq_wptr_db_phys;
+ phys_addr_t sq_cmpl_db_phys;
+ void __iomem *rq_buf_ba;
+ void __iomem *send_sgl;
+ void __iomem *sq_ba;
+ void __iomem *cq_ba;
+};
+
+struct perftest_trinfo trinfo;
+struct xrnic_rdma_conn_param conn_param;
+int rq_ci_db, sq_cmpl_db;
+
+int port = -1;
+module_param_string(server_ip, server_ip, sizeof(server_ip), 0444);
+module_param(port, int, 0444);
+MODULE_PARM_DESC(server_ip, "Target server ip address");
+
+/**
+ * perftest_parse_addr() - Parses the input IP address.
+ * @s_addr: IP address structure.
+ * @buf: Output IPV4 buffer pointer.
+ * return: 0 If address is either IPv6 or IPv4.
+ * else, returns EINVAL.
+ */
+int perftest_parse_addr(struct sockaddr_storage *s_addr, char *buf)
+{
+ size_t buflen = strlen(buf);
+ int ret;
+ const char *delim;
+
+ if (buflen <= INET_ADDRSTRLEN) {
+ struct sockaddr_in *sin_addr = (struct sockaddr_in *)s_addr;
+
+ ret = in4_pton(buf, buflen, (u8 *)&sin_addr->sin_addr.s_addr,
+ '\0', NULL);
+ if (!ret)
+ goto fail;
+
+ sin_addr->sin_family = AF_INET;
+ return 0;
+ }
+ if (buflen <= INET6_ADDRSTRLEN) {
+ struct sockaddr_in6 *sin6_addr = (struct sockaddr_in6 *)s_addr;
+
+ ret = in6_pton(buf, buflen,
+ (u8 *)&sin6_addr->sin6_addr.s6_addr,
+ -1, &delim);
+ if (!ret)
+ goto fail;
+
+ sin6_addr->sin6_family = AF_INET6;
+ return 0;
+ }
+fail:
+ return -EINVAL;
+}
+
+/**
+ * rq_handler() - receive packet callback routine.
+ * @rq_count: Rx packet count.
+ * @rq_context: context info.
+ */
+void rq_handler(u32 rq_count, void *rq_context)
+{
+ int i, qp_num, offset;
+ struct ernic_bwtest_struct *rq_buf;
+ struct xrnic_rdma_cm_id *cm_id;
+ struct perftest_wr *sq_wr;
+ struct mr *mem;
+ struct perftest_ctx *ctx;
+
+ ctx = (struct perftest_ctx *)rq_context;
+ cm_id = ctx->cm_id;
+ qp_num = cm_id->child_qp_num;
+ offset = sq_cmpl_db * XRNIC_SEND_SGL_SIZE;
+ for (i = 0; i < rq_count; i++) {
+ if (qp_num == 1) {
+ rq_buf = (struct ernic_bwtest_struct *)(char *)
+ cm_id->qp_info.rq_buf_ba_ca +
+ ((qp_num - 1) * rq_ci_db *
+ XRNIC_RECV_PKT_SIZE);
+ if (io_mr_idx > PERFTEST_IO_QPS)
+ goto done;
+ mem = perftest_io_mr[io_mr_idx];
+
+ rq_buf->rkey = htonl((unsigned int)mem->rkey);
+ rq_buf->vaddr = cpu_to_be64(mem->vaddr);
+
+ memcpy((u8 *)(trinfo.send_sgl + offset),
+ (u8 *)rq_buf,
+ sizeof(struct ernic_bwtest_struct));
+
+ sq_wr = (struct perftest_wr *)trinfo.sq_ba +
+ sq_cmpl_db;
+ sq_wr->ctx.wr_id = sq_cmpl_db;
+ sq_wr->length = sizeof(struct ernic_bwtest_struct);
+ sq_wr->remote_tag = ntohl(0xDEAD);
+ sq_wr->local_offset[0] = trinfo.send_sgl_phys + offset;
+ sq_wr->local_offset[1] = 0;
+
+ sq_wr->remote_offset[0] = 0x12345678;
+ sq_wr->remote_offset[1] = 0xABCDABCD;
+ sq_wr->completion_info[0] = htonl(0x11111111);
+ sq_wr->completion_info[1] = htonl(0x22222222);
+ sq_wr->completion_info[2] = htonl(0x33333333);
+ sq_wr->completion_info[3] = htonl(0x44444444);
+ sq_wr->opcode = XRNIC_SEND_ONLY;
+ }
+ xrnic_post_recv(&cm_id->qp_info, 1);
+ if (qp_num == 1) {
+ xrnic_post_send(&cm_id->qp_info, 1);
+ if (prev_qpn != rq_buf->qp_number) {
+ if (prev_qpn != 0)
+ io_mr_idx++;
+ prev_qpn = rq_buf->qp_number;
+ }
+ }
+
+done:
+ rq_ci_db++;
+
+ if (rq_ci_db >= (PERFTEST_RQ_DEPTH - 20))
+ rq_ci_db = 0;
+ if (qp_num == 1) {
+ sq_cmpl_db++;
+ if (sq_cmpl_db >= PERFTEST_SQ_DEPTH)
+ sq_cmpl_db = 0;
+ }
+ }
+}
+
+/**
+ * sq_handler() - completion call back.
+ * @sq_count: Tx packet count.
+ * @sq_context: context info.
+ */
+void sq_handler(u32 sq_count, void *sq_context)
+{
+/* TODO: This function is just a place holder for now.
+ * This function should handle completions for outgoing
+ * RDMA_SEND, RDMA_READ and RDMA_WRITE.
+ */
+ pr_info("XLNX[%d:%s]\n", __LINE__, __func__);
+}
+
+/**
+ * perftest_fill_wr() - Fills the workrequest in send queue base address.
+ * @sq_ba: send queue base address of the QP.
+ */
+void perftest_fill_wr(void __iomem *sq_ba)
+{
+ struct perftest_wr *sq_wr;
+ int i;
+
+ for (i = 0; i < XRNIC_SQ_DEPTH; i++) {
+ sq_wr = (struct perftest_wr *)sq_ba + i;
+ sq_wr->ctx.wr_id = i;
+ sq_wr->length = 16;
+ sq_wr->completion_info[0] = 0xAAAAAAAA;
+ sq_wr->completion_info[1] = 0xBBBBBBBB;
+ sq_wr->completion_info[2] = 0xCCCCCCCC;
+ sq_wr->completion_info[3] = 0xDDDDDDDD;
+ sq_wr->opcode = XRNIC_SEND_ONLY;
+ }
+}
+
+/**
+ * perftest_cm_handler() - CM handler call back routine.
+ * @cm_id: CM ID on which event received.
+ * @conn_event: Event information on the CM.
+ * @return: 0 on success or error code on failure.
+ */
+static int perftest_cm_handler(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event)
+{
+ int qp_num, per_qp_size;
+ struct perftest_ctx *ctx;
+
+ qp_num = cm_id->child_qp_num;
+ memset(&qp_attr, 0, sizeof(struct xrnic_qp_init_attr));
+ ctx = &perf_context[qp_num - 1];
+ switch (conn_event->cm_event) {
+ case XRNIC_REQ_RCVD:
+ qp_attr.xrnic_rq_event_handler = rq_handler;
+ qp_attr.xrnic_sq_event_handler = sq_handler;
+ qp_attr.qp_type = XRNIC_QPT_RC;
+ if (qp_num > 1) {
+ qp_attr.recv_pkt_size = _1MB_BUF_SIZ;
+ per_qp_size = (qp_num - 2) * _1MB_BUF_SIZ *
+ PERFTEST_RQ_DEPTH + XRNIC_RECV_PKT_SIZE *
+ PERFTEST_RQ_DEPTH;
+ } else {
+ qp_attr.recv_pkt_size = XRNIC_RECV_PKT_SIZE;
+ per_qp_size = 0;
+ }
+ qp_attr.rq_buf_ba_ca_phys = trinfo.rq_buf_ba_phys +
+ per_qp_size;
+ qp_attr.rq_buf_ba_ca = (char *)trinfo.rq_buf_ba +
+ per_qp_size;
+ per_qp_size = (qp_num - 1) * sizeof(struct perftest_wr) *
+ PERFTEST_SQ_DEPTH;
+ qp_attr.sq_ba_phys = trinfo.sq_ba_phys + per_qp_size;
+ qp_attr.sq_ba = (char *)trinfo.sq_ba + per_qp_size;
+ per_qp_size = (qp_num - 1) * (PERFTEST_SQ_DEPTH * 4);
+ qp_attr.cq_ba_phys = trinfo.cq_ba_phys + per_qp_size;
+ qp_attr.cq_ba = (char *)trinfo.cq_ba + per_qp_size;
+ qp_attr.rq_context = ctx;
+ qp_attr.sq_context = ctx;
+ ctx->cm_id = cm_id;
+ qp_attr.sq_depth = PERFTEST_SQ_DEPTH;
+ qp_attr.rq_depth = PERFTEST_RQ_DEPTH;
+ ctx->reg_mr = reg_phys_mr(pd, phys_mem[qp_num - 1],
+ PERFTEST_DEFAULT_MEM_SIZE,
+ MR_ACCESS_RDWR, NULL);
+ if (qp_num > 1)
+ perftest_io_mr[qp_num - 2] = ctx->reg_mr;
+
+ xrnic_rdma_create_qp(cm_id, ctx->reg_mr->pd,
+ &qp_attr);
+
+ memset(&conn_param, 0, sizeof(conn_param));
+ conn_param.initiator_depth = 16;
+ conn_param.responder_resources = 16;
+ xrnic_rdma_accept(cm_id, &conn_param);
+ break;
+ case XRNIC_ESTABLISHD:
+ if (cm_id->child_qp_num > 1) {
+ perftest_fill_wr((char *)trinfo.sq_ba +
+ ((qp_num - 1) *
+ sizeof(struct perftest_wr) *
+ PERFTEST_SQ_DEPTH));
+ xrnic_hw_hs_reset_sq_cq(&cm_id->qp_info, NULL);
+ }
+ break;
+ case XRNIC_DREQ_RCVD:
+ xrnic_destroy_qp(&cm_id->qp_info);
+ xrnic_rdma_disconnect(cm_id);
+ xrnic_rdma_destroy_id(cm_id, 0);
+ dereg_mr(ctx->reg_mr);
+ io_mr_idx = 0;
+ prev_qpn = 0;
+ rq_ci_db = 0;
+ sq_cmpl_db = 0;
+ break;
+ default:
+ pr_info("Unhandled CM Event: %d\n",
+ conn_event->cm_event);
+ }
+ return 0;
+}
+
+/**
+ * perftest_init() - Perf test init function.
+ * @return: 0 on success or error code on failure.
+ */
+static int __init perftest_init(void)
+{
+ int ret, i;
+ struct sockaddr_storage s_addr;
+ struct sockaddr_in *sin_addr;
+ struct sockaddr_in6 *sin6_addr;
+
+ if (strcmp(server_ip, "0.0.0.0") == 0) {
+ pr_err("server ip module parameter not provided\n");
+ return -EINVAL;
+ }
+
+ /* If port number is not set, then it should point to the default */
+ if (-1 == port) {
+ port = PERFTEST_PORT;
+ pr_info("Using app default port number: %d\n", port);
+ } else if (port < 0) {
+ /* Any other -ve value */
+ /* Some ports are reserved and few other may be use,
+ * we could add check here to validate given port number
+ * is free to use or not
+ */
+ pr_err("port number should not be a negative value\n");
+ return -EINVAL;
+ }
+ pr_info("Using port number %d\n", port);
+
+ cm_id = xrnic_rdma_create_id(perftest_cm_handler, NULL, XRNIC_PS_TCP,
+ XRNIC_QPT_UC, PERFTEST_MAX_QPS);
+ if (!cm_id)
+ goto err;
+
+ if (perftest_parse_addr(&s_addr, server_ip))
+ goto err;
+
+ if (s_addr.ss_family == AF_INET) {
+ sin_addr = (struct sockaddr_in *)&s_addr;
+ ret = xrnic_rdma_bind_addr(cm_id,
+ (u8 *)&sin_addr->sin_addr.s_addr,
+ port, AF_INET);
+ if (ret < 0) {
+ pr_err("RDMA BIND Failed for IPv4\n");
+ goto err;
+ }
+ }
+ if (s_addr.ss_family == AF_INET6) {
+ sin6_addr = (struct sockaddr_in6 *)&s_addr;
+ ret = xrnic_rdma_bind_addr(cm_id,
+ (u8 *)&sin6_addr->sin6_addr.s6_addr,
+ port, AF_INET6);
+ if (ret < 0) {
+ pr_err("RDMA BIND Failed for IPv6\n");
+ goto err;
+ }
+ }
+
+ if (xrnic_rdma_listen(cm_id, 1) != XRNIC_SUCCESS)
+ goto err;
+
+ trinfo.rq_buf_ba_phys = alloc_mem(NULL, PERF_TEST_RQ_BUF_SIZ);
+ if (-ENOMEM == trinfo.rq_buf_ba_phys)
+ goto err;
+ trinfo.rq_buf_ba =
+ (void __iomem *)(uintptr_t)get_virt_addr
+ (trinfo.rq_buf_ba_phys);
+
+ trinfo.send_sgl_phys = alloc_mem(NULL, 0x400000);
+ if (-ENOMEM == trinfo.send_sgl_phys)
+ goto err;
+ trinfo.send_sgl =
+ (void __iomem *)(uintptr_t)get_virt_addr(trinfo.send_sgl_phys);
+
+ trinfo.sq_ba_phys = alloc_mem(NULL, 0x100000);
+ if (-ENOMEM == trinfo.sq_ba_phys)
+ goto err;
+ trinfo.sq_ba =
+ (void __iomem *)(uintptr_t)get_virt_addr(trinfo.sq_ba_phys);
+
+ trinfo.cq_ba_phys = alloc_mem(NULL, 0x40000);
+ if (-ENOMEM == trinfo.cq_ba_phys)
+ goto err;
+ trinfo.cq_ba =
+ (void __iomem *)(uintptr_t)get_virt_addr(trinfo.cq_ba_phys);
+ trinfo.rq_wptr_db_phys = alloc_mem(NULL, 8);
+ trinfo.sq_cmpl_db_phys = alloc_mem(NULL, 8);
+ pd = alloc_pd();
+ for (i = 0; i < PERFTEST_MAX_QPS; i++) {
+ phys_mem[i] = alloc_mem(pd, PERFTEST_DEFAULT_MEM_SIZE);
+ if (IS_ERR_VALUE(phys_mem[i])) {
+ pr_err("PERFTEST[%d:%s] Mem registration failed: %lld\n",
+ __LINE__, __func__, phys_mem[i]);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+/* free_mem() works on only valid physical address returned from alloc_mem(),
+ * and ignores if NULL or invalid address is passed.
+ * So, even if any of the above allocations fail in the middle,
+ * we can safely call free_mem() on all addresses.
+ *
+ * we are using carve-out memory for the requirements of ERNIC.
+ * so, we cannot use devm_kzalloc() as kernel cannot see these
+ * memories until ioremapped.
+ */
+ free_mem(trinfo.rq_buf_ba_phys);
+ free_mem(trinfo.send_sgl_phys);
+ free_mem(trinfo.sq_ba_phys);
+ free_mem(trinfo.cq_ba_phys);
+ free_mem(trinfo.rq_wptr_db_phys);
+ free_mem(trinfo.sq_cmpl_db_phys);
+ for (i = 0; i < PERFTEST_MAX_QPS; i++)
+ free_mem(phys_mem[i]);
+
+ dealloc_pd(pd);
+
+ return -EINVAL;
+}
+
+/**
+ * perftest_exit() - perftest module exit function.
+ */
+static void __exit perftest_exit(void)
+{
+ int i;
+
+ free_mem(trinfo.rq_buf_ba_phys);
+ free_mem(trinfo.send_sgl_phys);
+ free_mem(trinfo.sq_ba_phys);
+ free_mem(trinfo.cq_ba_phys);
+ free_mem(trinfo.rq_wptr_db_phys);
+ free_mem(trinfo.sq_cmpl_db_phys);
+ for (i = 0; i < PERFTEST_MAX_QPS; i++)
+ free_mem(phys_mem[i]);
+
+ dealloc_pd(pd);
+}
+
+/* This driver is an example driver, which uses the APIs exported in
+ * ernic driver, to demonstrate the RDMA communication between peers
+ * on the infiniband network. The remote peer can be any RDMA enbled NIC.
+ * There is no real device for this driver and so, compatibility string and
+ * probe function are not needed for this driver.
+ */
+module_init(perftest_init);
+module_exit(perftest_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Perftest Example driver");
+MODULE_AUTHOR("SDHANVAD");
diff --git a/drivers/staging/xlnx_ernic/xhw_config.h b/drivers/staging/xlnx_ernic/xhw_config.h
new file mode 100644
index 000000000000..7846abd18bec
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xhw_config.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_HW_CONFIG_H
+#define _XRNIC_HW_CONFIG_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+
+#define XRNIC_HW_MAX_QP_ENABLE 30
+#define XRNIC_HW_MAX_QP_SUPPORT 28
+#define XRNIC_HW_FLOW_CONTROL_VALUE 0
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_HW_CONFIG_H*/
diff --git a/drivers/staging/xlnx_ernic/xhw_def.h b/drivers/staging/xlnx_ernic/xhw_def.h
new file mode 100644
index 000000000000..c59f266c03f6
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xhw_def.h
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_HW_DEF_H
+#define _XRNIC_HW_DEF_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include "xhw_config.h"
+
+#define XRNIC_MAX_QP_ENABLE XRNIC_HW_MAX_QP_ENABLE
+#define XRNIC_MAX_QP_SUPPORT XRNIC_HW_MAX_QP_SUPPORT
+#define XRNIC_MAX_PORT_SUPPORT 0xFFFE
+#define XRNIC_REG_WIDTH 32
+#define XRNIC_QPS_ENABLED XRNIC_MAX_QP_ENABLE
+#define XRNIC_QP1_SEND_PKT_SIZE 512
+#define XRNIC_FLOW_CONTROL_VALUE XRNIC_HW_FLOW_CONTROL_VALUE
+#define XRNIC_CONFIG_XRNIC_EN 0x1
+#define XRNIC_UDP_SRC_PORT 0x12B7
+#define XRNIC_CONFIG_IP_VERSION (0x1 << 1)
+#define XRNIC_CONFIG_DEPKT_BYPASS_EN (0x1 << 2)
+#define XRNIC_CONFIG_ERR_BUF_EN (0x1 << 5)
+#define XRNIC_CONFIG_FLOW_CONTROL_EN (XRNIC_FLOW_CONTROL_VALUE << 6)
+#define XRNIC_CONFIG_NUM_QPS_ENABLED (XRNIC_QPS_ENABLED << 8)
+#define XRNIC_CONFIG_UDP_SRC_PORT (XRNIC_UDP_SRC_PORT << 16)
+
+#define XRNIC_RQ_CQ_INTR_STS_REG_SUPPORTED 1
+
+/* Clear the the interrupt writing that bit to interrupt status register.*/
+#define RDMA_READ 4
+#define RDMA_SEND 2
+#define RDMA_WRITE 0
+
+#define XRNIC_QP_TIMEOUT_RETRY_CNT 0x3 /*0x3*/
+#define XRNIC_QP_TIMEOUT_RNR_NAK_TVAL 0x1F /*MAX*/
+#define XRNIC_QP_TIMEOUT_CONFIG_TIMEOUT 0x1F /*MAX 0x1f*/
+#define XRNIC_QP_TIMEOUT_CONFIG_RETRY_CNT \
+ (XRNIC_QP_TIMEOUT_RETRY_CNT << 8)
+#define XRNIC_QP_TIMEOUT_CONFIG_RNR_RETRY_CNT \
+ (XRNIC_QP_TIMEOUT_RETRY_CNT << 11)
+#define XRNIC_QP_TIMEOUT_CONFIG_RNR_NAK_TVAL \
+ (XRNIC_QP_TIMEOUT_RNR_NAK_TVAL << 16)
+
+#define XRNIC_QP_PMTU 0x4
+#define XRNIC_QP_MAX_RD_OS 0xFF
+#define XRNIC_QP_RQ_BUFF_SZ 0x2
+#define XRNIC_QP1_RQ_BUFF_SZ 0x02
+#define XRNIC_QP_CONFIG_QP_ENABLE 0x1
+#define XRNIC_QP_CONFIG_ACK_COALSE_EN BIT(1)
+#define XRNIC_QP_CONFIG_RQ_INTR_EN BIT(2)
+#define XRNIC_QP_CONFIG_CQE_INTR_EN BIT(3)
+#define XRNIC_QP_CONFIG_HW_HNDSHK_DIS BIT(4)
+#define XRNIC_QP_CONFIG_CQE_WRITE_EN BIT(5)
+#define XRNIC_QP_CONFIG_UNDER_RECOVERY BIT(6)
+#define XRNIC_QP_CONFIG_IPV6_EN BIT(7)
+#define XRNIC_QP_CONFIG_PMTU (0x4 << 8)
+#define XRNIC_QP_CONFIG_PMTU_256 (0x0 << 8)
+#define XRNIC_QP_CONFIG_PMTU_512 (0x1 << 8)
+#define XRNIC_QP_CONFIG_PMTU_1024 (0x2 << 8)
+#define XRNIC_QP_CONFIG_PMTU_2048 (0x3 << 8)
+#define XRNIC_QP_CONFIG_PMTU_4096 (0x4 << 8)
+#define XRNIC_QP_RQ_BUF_SIZ_DIV (256)
+#define XRNIC_QP_RQ_BUF_CFG_REG_BIT_OFS (16)
+#define XRNIC_QP_CONFIG_RQ_BUFF_SZ(x) (((x) / XRNIC_QP_RQ_BUF_SIZ_DIV)\
+ << XRNIC_QP_RQ_BUF_CFG_REG_BIT_OFS)
+#define XRNIC_QP1_CONFIG_RQ_BUFF_SZ (XRNIC_QP1_RQ_BUFF_SZ << 16)
+
+#define XRNIC_QP_PARTITION_KEY 0xFFFF
+#define XRNIC_QP_TIME_TO_LIVE 0x40
+
+#define XRNIC_QP_ADV_CONFIG_TRAFFIC_CLASS 0x3F
+#define XRNIC_QP_ADV_CONFIG_TIME_TO_LIVE (XRNIC_QP_TIME_TO_LIVE << 8)
+#define XRNIC_QP_ADV_CONFIG_PARTITION_KEY (XRNIC_QP_PARTITION_KEY << 16)
+
+#define XRNIC_REJ_RESEND_COUNT 3
+#define XRNIC_REP_RESEND_COUNT 3
+#define XRNIC_DREQ_RESEND_COUNT 3
+
+#define XNVEMEOF_RNIC_IF_RHOST_BASE_ADDRESS 0x8c000000
+#define XRNIC_CONFIG_ENABLE 1
+#define XRNIC_RESERVED_SPACE 0x4000
+#define XRNIC_NUM_OF_TX_HDR 128
+#define XRNIC_SIZE_OF_TX_HDR 128
+#define XRNIC_NUM_OF_TX_SGL 256
+#define XRNIC_SIZE_OF_TX_SGL 64
+#define XRNIC_NUM_OF_BYPASS_BUF 32
+#define XRNIC_SIZE_OF_BYPASS_BUF 512
+#define XRNIC_NUM_OF_ERROR_BUF 64
+#define XRNIC_SIZE_OF_ERROR_BUF 256
+#define XRNIC_OUT_ERRST_Q_NUM_ENTRIES 0x40
+#define XRNIC_OUT_ERRST_Q_WRPTR 0x0
+#define XRNIC_IN_ERRST_Q_NUM_ENTRIES 0x40
+#define XRNIC_IN_ERRST_Q_WRPTR 0x0
+#define XRNIC_NUM_OF_DATA_BUF 4096
+#define XRNIC_SIZE_OF_DATA_BUF 4096
+#define XRNIC_NUM_OF_RESP_ERR_BUF 64
+#define XRNIC_SIZE_OF_RESP_ERR_BUF 256
+#define XRNIC_MAD_HEADER 24
+#define XRNIC_MAD_DATA 232
+#define XRNIC_RECV_PKT_SIZE 512
+#define XRNIC_SEND_PKT_SIZE 64
+#define XRNIC_SEND_SGL_SIZE 4096
+#define XRNIC_MAX_SEND_SGL_SIZE 4096
+#define XRNIC_MAX_SEND_PKT_SIZE 4096
+#define XRNIC_MAX_RECV_PKT_SIZE 4096
+#define XRNIC_MAX_SQ_DEPTH 256
+#define XRNIC_MAX_RQ_DEPTH 256
+#define XRNIC_SQ_DEPTH 128
+#define XRNIC_RQ_DEPTH 64
+#define XRNIC_RQ_WRPTR_DBL 0xBC004000
+#define XRNIC_BYPASS_BUF_WRPTR 0xBC00C000
+#define XRNIC_ERROR_BUF_WRPTR 0xBC010000
+
+#define PKT_VALID_ERR_INTR_EN 0x1
+#define MAD_PKT_RCVD_INTR_EN (0x1 << 1)
+#define BYPASS_PKT_RCVD_INTR_EN (0x1 << 2)
+#define RNR_NACK_GEN_INTR_EN (0x1 << 3)
+#define WQE_COMPLETED_INTR_EN (0x1 << 4)
+#define ILL_OPC_SENDQ_INTR_EN (0x1 << 5)
+#define QP_PKT_RCVD_INTR_EN (0x1 << 6)
+#define FATAL_ERR_INTR_EN (0x1 << 7)
+#define ERNIC_MEM_REGISTER
+
+#define XRNIC_INTR_ENABLE_DEFAULT 0x000000FF
+#define XRNIC_VALID_INTR_ENABLE 0
+
+/* XRNIC Controller global configuration registers */
+
+struct xrnic_conf {
+ __u32 xrnic_en:1;
+ __u32 ip_version:1; //IPv6 or IPv4
+ __u32 depkt_bypass_en:1;
+ __u32 reserved:5;
+ __u32 num_qps_enabled:8;
+ __u32 udp_src_port:16;
+} __packed;
+
+struct tx_hdr_buf_sz {
+ __u32 num_hdrs:16;
+ __u32 buffer_sz:16; //in bytes
+} __packed;
+
+struct tx_sgl_buf_sz {
+ __u32 num_sgls:16;
+ __u32 buffer_sz:16; //in bytes
+} __packed;
+
+struct bypass_buf_sz {
+ __u32 num_bufs:16;
+ __u32 buffer_sz:16;
+} __packed;
+
+struct err_pkt_buf_sz {
+ __u32 num_bufs:16;
+ __u32 buffer_sz:16;
+} __packed;
+
+struct timeout_conf {
+ __u32 timeout:5;
+ __u32 reserved:3;
+ __u32 retry_cnt:3;
+ __u32 retry_cnt_rnr:3;
+ __u32 reserved1:2;
+ __u32 rnr_nak_tval:5;
+ __u32 reserved2:11;
+
+} __packed;
+
+struct out_errsts_q_sz {
+ __u32 num_entries:16;
+ __u32 reserved:16;
+} __packed;
+
+struct in_errsts_q_sz {
+ __u32 num_entries:16;
+ __u32 reserved:16;
+} __packed;
+
+struct inc_sr_pkt_cnt {
+ __u32 inc_send_cnt:16;
+ __u32 inc_rresp_cnt:16;
+} __packed;
+
+struct inc_am_pkt_cnt {
+ __u32 inc_acknack_cnt:16;
+ __u32 inc_mad_cnt:16;
+} __packed;
+
+struct out_io_pkt_cnt {
+ __u32 inc_send_cnt:16;
+ __u32 inc_rw_cnt:16;
+} __packed;
+
+struct out_am_pkt_cnt {
+ __u32 inc_acknack_cnt:16;
+ __u32 inc_mad_cnt:16;
+} __packed;
+
+struct last_in_pkt {
+ __u32 opcode:8;
+ __u32 qpid:8;
+ __u32 psn_lsb:16;
+} __packed;
+
+struct last_out_pkt {
+ __u32 opcode:8;
+ __u32 qpid:8;
+ __u32 psn_lsb:16;
+} __packed;
+
+/*Interrupt register definition.*/
+struct intr_en {
+ __u32 pkt_valdn_err_intr_en:1;
+ __u32 mad_pkt_rcvd_intr_en:1;
+ __u32 bypass_pkt_rcvd_intr_en:1;
+ __u32 rnr_nack_gen_intr_en:1;
+ __u32 wqe_completed_i:1;
+ __u32 ill_opc_in_sq_intr_en:1;
+ __u32 qp_pkt_rcvd_intr_en:1;
+ __u32 fatal_err_intr_en:1;
+ __u32 reverved:24;
+} __packed;
+
+struct data_buf_sz {
+ __u16 num_bufs;
+ __u16 buffer_sz;
+};
+
+struct resp_err_buf_sz {
+ __u16 num_bufs;
+ __u16 buffer_sz;
+};
+
+/*Global register configuration*/
+struct xrnic_ctrl_config {
+ struct xrnic_conf xrnic_conf;
+ __u32 xrnic_adv_conf;
+ __u32 reserved1[2];
+ __u32 mac_xrnic_src_addr_lsb;
+ __u32 mac_xrnic_src_addr_msb;
+ __u32 reserved2[2];
+ __u32 ip_xrnic_addr1; //0x0020
+ __u32 ip_xrnic_addr2; //0x0024
+ __u32 ip_xrnic_addr3; //0x0028
+ __u32 ip_xrnic_addr4; //0x002C
+ __u32 tx_hdr_buf_ba; //0x0030
+ __u32 reserved_0x34; //0x0034
+ struct tx_hdr_buf_sz tx_hdr_buf_sz; //0x0038
+ __u32 reserved_0x3c;
+
+ __u32 tx_sgl_buf_ba; //0x0040
+ __u32 reserved_0x44; //0x0044
+ struct tx_sgl_buf_sz tx_sgl_buf_sz; //0x0048
+ __u32 reserved_0x4c;
+
+ __u32 bypass_buf_ba; //0x0050
+ __u32 reserved_0x54; //0x0054
+ struct bypass_buf_sz bypass_buf_sz; //0x0058
+ __u32 bypass_buf_wrptr; //0x005C
+ __u32 err_pkt_buf_ba; //0x0060
+ __u32 reserved_0x64; //0x0064
+ struct err_pkt_buf_sz err_pkt_buf_sz; //0x0068
+ __u32 err_buf_wrptr; //0x006C
+ __u32 ipv4_address; //0x0070
+ __u32 reserved_0x74;
+
+ __u32 out_errsts_q_ba; //0x0078
+ __u32 reserved_0x7c;
+ struct out_errsts_q_sz out_errsts_q_sz; //0x0080
+ __u32 out_errsts_q_wrptr; //0x0084
+
+ __u32 in_errsts_q_ba; //0x0088
+ __u32 reserved_0x8c;
+ struct in_errsts_q_sz in_errsts_q_sz; //0x0090
+ __u32 in_errsts_q_wrptr; //0x0094
+
+ __u32 reserved_0x98; //0x0098
+ __u32 reserved_0x9c; //0x009C
+
+ __u32 data_buf_ba; //0x00A0
+ __u32 reserved_0xa4; //0x00A4
+ struct data_buf_sz data_buf_sz; //0x00A8
+
+ __u32 cnct_io_conf; //0x00AC
+
+ __u32 resp_err_pkt_buf_ba; //0x00B0
+ __u32 reserved_0xb4; //0x00B4
+ struct resp_err_buf_sz resp_err_buf_sz; //0x00B8
+
+ __u32 reserved3[17]; //0x0095
+
+ struct inc_sr_pkt_cnt inc_sr_pkt_cnt;//0x0100
+ struct inc_am_pkt_cnt inc_am_pkt_cnt;//0x0104
+ struct out_io_pkt_cnt out_io_pkt_cnt;//0x108
+ struct out_am_pkt_cnt out_am_pkt_cnt;//0x010c
+ struct last_in_pkt last_in_pkt; //0x0110
+ struct last_out_pkt last_out_pkt; //0x0114
+
+ __u32 inv_dup_pkt_cnt; //0x0118 incoming invalid duplicate
+
+ __u32 rnr_in_pkt_sts; //0x011C
+ __u32 rnr_out_pkt_sts; //0x0120
+
+ __u32 wqe_proc_sts; //0x0124
+
+ __u32 pkt_hdr_vld_sts; //0x0128
+ __u32 qp_mgr_sts; //0x012C
+
+ __u32 incoming_all_drop_count; //0x130
+ __u32 incoming_nack_pkt_count; //0x134
+ __u32 outgoing_nack_pkt_count; //0x138
+ __u32 resp_handler_status; //0x13C
+
+ __u32 reserved4[16];
+
+ struct intr_en intr_en; //0x0180
+ __u32 intr_sts; //0x0184
+ __u32 reserved5[2];
+ __u32 rq_intr_sts_1; //0x0190
+ __u32 rq_intr_sts_2; //0x0194
+ __u32 rq_intr_sts_3; //0x0198
+ __u32 rq_intr_sts_4; //0x019C
+ __u32 rq_intr_sts_5; //0x01A0
+ __u32 rq_intr_sts_6; //0x01A4
+ __u32 rq_intr_sts_7; //0x01A8
+ __u32 rq_intr_sts_8; //0x01AC
+
+ __u32 cq_intr_sts_1; //0x01B0
+ __u32 cq_intr_sts_2; //0x01B4
+ __u32 cq_intr_sts_3; //0x01B8
+ __u32 cq_intr_sts_4; //0x01BC
+ __u32 cq_intr_sts_5; //0x01B0
+ __u32 cq_intr_sts_6; //0x01B4
+ __u32 cq_intr_sts_7; //0x01B8
+ __u32 cq_intr_sts_8; //0x01BC
+
+ __u32 reserved6[12];
+};
+
+struct qp_conf {
+ __u32 qp_enable:1;
+ __u32 ack_coalsc_en:1;
+ __u32 rq_intr_en:1;
+ __u32 cq_intr_en:1;
+ __u32 hw_hndshk_dis:1;
+ __u32 cqe_write_en:1;
+ __u32 qp_under_recovery:1;
+ __u32 ip_version:1;
+ __u32 pmtu :3;
+ __u32 reserved2:5;
+ __u32 rq_buf_sz:16; //RQ buffer size (in multiples of 256B)
+} __packed;
+
+struct qp_adv_conf {
+ __u32 traffic_class:6;
+ __u32 reserved1 :2;
+ __u32 time_to_live:8;
+ __u32 partition_key:16;
+} __packed;
+
+struct time_out {
+ __u32 timeout:5;
+ __u32 reserved1:3;
+ __u32 retry_cnt:3;
+ __u32 reserved2:5;
+ __u32 rnr_nak_tval:5;
+ __u32 reserved3:3;
+ __u32 curr_retry_cnt:3;
+ __u32 reserved4:2;
+ __u32 curr_rnr_nack_cnt:3;
+ __u32 reserved:1;
+} __packed;
+
+struct qp_status {
+ __u32 qp_fatal:1;
+ __u32 rq_ovfl:1;
+ __u32 sq_full:1;
+ __u32 osq_full:1;
+ __u32 cq_full:1;
+ __u32 reserved1:4;
+ __u32 sq_empty:1;
+ __u32 osq_empty:1;
+ __u32 qp_retried:1;
+ __u32 reserved2:4;
+ __u32 nak_syndr_rcvd:7;
+ __u32 reserved3:1;
+ __u32 curr_retry_cnt:3;
+ __u32 reserved4:1;
+ __u32 curr_rnr_nack_cnt:3;
+ __u32 reserved5:1;
+} __packed;
+
+//This structure is applicable to the rdma queue pair other than QP1.
+struct rq_buf_ba_ca {
+ __u32 reserved:8; //0x308
+ __u32 rq_buf_ba:24;
+} __packed;
+
+struct sq_ba {
+ __u32 reserved1:5; //0x310
+ __u32 sq_ba:27;
+} __packed;
+
+struct cq_ba {
+ __u32 reserved2:5; //0x318
+ __u32 cq_ba:27;
+} __packed;
+
+struct cq_head {
+ __u32 cq_head:16; //0x330
+ __u32 reserved5:16;
+} __packed;
+
+struct rq_ci_db {
+ __u32 rq_ci_db:16; //0x334
+ __u32 reserved6:16;
+} __packed;
+
+struct sq_pi_db {
+ __u32 sq_pi_db:16; //0x338
+ __u32 reserved7:16;
+} __packed;
+
+struct q_depth {
+ __u32 sq_depth:16; //0x33c
+ __u32 cq_depth:16;
+} __packed;
+
+struct sq_psn {
+ __u32 sq_psn:24; //0x340
+ __u32 reserved8:8;
+} __packed;
+
+struct last_rq_req {
+ __u32 rq_psn:24; //0x344
+ __u32 rq_opcode:8;
+} __packed;
+
+struct dest_qp_conf {
+ __u32 dest_qpid:24; //0x348
+ __u32 reserved9:8;
+} __packed;
+
+struct stat_ssn {
+ __u32 exp_ssn:24; //0x380
+ __u32 reserved10:8;
+} __packed;
+
+struct stat_msn {
+ __u32 curr_msn:24; //0x384
+ __u32 reserved11:8;
+
+} __packed;
+
+struct stat_curr_sqptr_pro {
+ __u32 curr_sqptr_proc:16;
+ __u32 reserved12:16;
+} __packed;
+
+struct stat_resp_psn {
+ __u32 exp_resp_psn:24;
+ __u32 reserved:8;
+} __packed;
+
+struct stat_rq_buf_ca {
+ __u32 reserved:8;
+ __u32 rq_buf_ca:24;
+} __packed;
+
+/*QP1 is special attribue for all the management packets as per ROCEv2 spec */
+struct rdma_qp1_attr {
+ struct qp_conf qp_conf; //0x200
+ struct qp_adv_conf qp_adv_conf; //0x204
+ struct rq_buf_ba_ca rq_buf_ba_ca; //0x208
+ __u32 reserved_0x20c; //0x20c
+ struct sq_ba sq_ba; //0x210
+ __u32 reserved_0x214; //0x214
+ struct cq_ba cq_ba; //0x218
+ __u32 reserved_0x21c; //0x2c0
+ __u32 rq_wrptr_db_add; //0x220
+ __u32 reserved_0x224; //0x224
+ __u32 sq_cmpl_db_add; //0x228
+ __u32 reserved_0x22c; //0x22c
+ struct cq_head cq_head; //0x230
+ struct rq_ci_db rq_ci_db; //0x234
+ struct sq_pi_db sq_pi_db; //0x238
+ struct q_depth q_depth; //0x23c
+ __u32 reserved1[2]; //0x240
+ struct dest_qp_conf dest_qp_conf; //0x248
+ struct timeout_conf timeout_conf; //0x24C
+ __u32 mac_dest_addr_lsb; //0x250
+ __u32 mac_dest_addr_msb; //0x254
+ __u32 reserved2[2];
+ __u32 ip_dest_addr1; //0x260
+ __u32 ip_dest_addr2; //0x264
+ __u32 ip_dest_addr3; //0x268
+ __u32 ip_dest_addr4; //0x26C
+ __u32 reserved3[6]; //0x270-287(inclusive)
+ struct qp_status qp_status; //0x288
+ __u32 reserved4[2]; //0x240-287(inclusive)
+ struct stat_rq_buf_ca stat_rq_buf_ca;//0x294
+ __u32 reserved5[26]; //0x298-2Ff(inclusive)
+};
+
+/* General RDMA QP attribute*/
+struct rdma_qp_attr {
+ struct qp_conf qp_conf; //0x300
+ struct qp_adv_conf qp_adv_conf; //0x304
+ struct rq_buf_ba_ca rq_buf_ba_ca;//0x308
+ __u32 reserved_0x30c; //0x30c
+ struct sq_ba sq_ba; //0x310
+ __u32 reserved_0x314; //0x214
+ struct cq_ba cq_ba; //0x318
+ __u32 reserved_0x31c; //0x31c
+ __u32 rq_wrptr_db_add; //0x320
+ __u32 reserved_0x324; //0x324
+ __u32 sq_cmpl_db_add; //0x328
+ __u32 reserved_0x32c; //0x22c
+ struct cq_head cq_head; //0x330
+ struct rq_ci_db rq_ci_db;//0x334
+ struct sq_pi_db sq_pi_db; //0x338
+ struct q_depth q_depth;//0x33c
+ struct sq_psn sq_psn; //0x340
+ struct last_rq_req last_rq_req;//0x344
+ struct dest_qp_conf dest_qp_conf; //0x348
+ struct timeout_conf timeout_conf; //0x34C
+ __u32 mac_dest_addr_lsb; //0x350
+ __u32 mac_dest_addr_msb; //0x354
+ __u32 reserved1[2]; //0x358
+ __u32 ip_dest_addr1; //0x360
+ __u32 ip_dest_addr2; //0x364
+ __u32 ip_dest_addr3; //0x368
+ __u32 ip_dest_addr4; //0x36C
+ __u32 reserved2[4];
+ struct stat_ssn stat_ssn;//0x380
+ struct stat_msn stat_msn;//0x384
+ struct qp_status qp_status; //0x388
+ struct stat_curr_sqptr_pro stat_curr_sqptr_pro;//0x38C
+ struct stat_resp_psn stat_resp_psn; //0x0390
+ struct stat_rq_buf_ca stat_rq_buf_ca;//0x0394
+ __u32 stat_wqe; //0x398
+ __u32 stat_rq_pi_db; //0x39C
+#ifdef ERNIC_MEM_REGISTER
+ __u32 reserved3[4];
+ __u32 pd;
+ __u32 reserved[19];
+#else
+ __u32 reserved3[24];
+#endif
+};
+
+union ctx { // 2 Byte
+ __u16 context;
+ __u16 wr_id;
+} __packed;
+
+//Work request 64Byte size
+struct wr {
+ union ctx ctx; // 2 Byte
+ __u8 reserved1[2];
+ __u32 local_offset[2];
+ __u32 length;
+ __u8 opcode;
+ __u8 reserved2[3];
+ __u32 remote_offset[2];
+ __u32 remote_tag;
+ __u32 completion_info[4];
+ __u8 reserved4[16];
+} __packed;
+
+union ctxe {
+ __u16 context :16;
+ __u16 wr_id:16;
+} __packed;
+
+//Completion Queue Entry 16 Byte
+struct cqe {
+ union ctxe ctxe; // 2 Byte
+ __u8 opcode;
+ __u8 err_flag;
+} __packed;
+
+struct xrnic_reg_map {
+ struct xrnic_ctrl_config xrnic_ctrl_config;
+ struct rdma_qp1_attr rdma_qp1_attr;
+ struct rdma_qp_attr rdma_qp_attr[255];
+
+};
+
+struct xrnic_memory_map {
+ struct xrnic_reg_map *xrnic_regs;
+ u64 xrnic_regs_phys;
+ void *send_sgl;
+ u64 send_sgl_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ struct wr *sq_ba;
+ u64 sq_ba_phys;
+ void *tx_hdr_buf_ba;
+ u64 tx_hdr_buf_ba_phys;
+ void *tx_sgl_buf_ba;
+ u64 tx_sgl_buf_ba_phys;
+ void *bypass_buf_ba;
+ u64 bypass_buf_ba_phys;
+ void *err_pkt_buf_ba;
+ u64 err_pkt_buf_ba_phys;
+ void *out_errsts_q_ba;
+ u64 out_errsts_q_ba_phys;
+ void *in_errsts_q_ba;
+ u64 in_errsts_q_ba_phys;
+ void *rq_wrptr_db_add;
+ u64 rq_wrptr_db_add_phys;
+ void *sq_cmpl_db_add;
+ u64 sq_cmpl_db_add_phys;
+ void *stat_rq_buf_ca;
+ u64 stat_rq_buf_ca_phys;
+ void *data_buf_ba;
+ u64 data_buf_ba_phys;
+ u64 resp_err_pkt_buf_ba_phys;
+ void *resp_err_pkt_buf_ba;
+ u32 intr_en;
+ u32 cq_intr[8];
+ u32 rq_intr[8];
+ u64 xrnicif_phys;
+};
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_HW_DEF_H*/
diff --git a/drivers/staging/xlnx_ernic/xif.h b/drivers/staging/xlnx_ernic/xif.h
new file mode 100644
index 000000000000..fb5f02d8c08c
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xif.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_IF_H
+#define _XRNIC_IF_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/udp.h>
+
+#define XRNIC_MAX_CHILD_CM_ID 255
+#define XRNIC_CM_PRVATE_DATA_LENGTH 32
+
+enum xrnic_wc_event {
+ XRNIC_WC_RDMA_WRITE = 0x0,
+ XRNIC_WC_SEND = 0x2,
+ XRNIC_WC_RDMA_READ = 0x4,
+};
+
+union xrnic_ctxe { // 2 Byte
+ __u16 context :16;
+ __u16 wr_id:16;
+} __packed;
+
+struct xrnic_cqe {
+ union xrnic_ctxe ctxe; // 2 Byte
+ __u8 opcode; // 1 Byte
+ __u8 err_flag; // 1 Byte
+} __packed;
+
+enum xrnic_port_space {
+ XRNIC_PS_SDP = 0x0001,
+ XRNIC_PS_IPOIB = 0x0002,
+ XRNIC_PS_IB = 0x013F,
+ XRNIC_PS_TCP = 0x0106,
+ XRNIC_PS_UDP = 0x0111,
+};
+
+enum xrnic_cm_error {
+ XRNIC_INVALID_CM_ID = 2,
+ XRNIC_INVALID_CM_OUTSTANDING = 3,
+ XRNIC_INVALID_QP_ID = 4,
+ XRNIC_INVALID_QP_INIT_ATTR = 5,
+ XRNIC_INVALID_NUM_CHILD = 6,
+ XRNIC_INVALID_CHILD_ID = 7,
+ XRNIC_INVALID_CHILD_NUM = 8,
+ XRNIC_INVALID_QP_TYPE = 9,
+ XRNIC_INVALID_PORT = 10,
+ XRNIC_INVALID_ADDR = 11,
+ XRNIC_INVALID_PKT_CNT = 12,
+ XRNIC_INVALID_ADDR_TYPE = 13,
+ XRNIC_INVALID_QP_CONN_PARAM = 14,
+ XRNIC_INVALID_QP_STATUS = 15,
+};
+
+enum xrnic_qp_type {
+ XRNIC_QPT_RC,
+ XRNIC_QPT_UC,
+ XRNIC_QPT_UD,
+};
+
+enum xrnic_rdma_cm_event_type {
+ XRNIC_LISTEN = 1,
+ XRNIC_REQ_RCVD,
+ XRNIC_MRA_SENT,
+ XRNIC_REJ_SENT,
+ XRNIC_REJ_RECV,
+ XRNIC_REP_SENT,
+ XRNIC_MRA_RCVD,
+ XRNIC_ESTABLISHD,
+ XRNIC_DREQ_RCVD,
+ XRNIC_DREQ_SENT,
+ XRNIC_RTU_TIMEOUT,
+ XRNIC_TIMEWAIT,
+ XRNIC_DREP_TIMEOUT,
+ XRNIC_REP_RCVD,
+ XRNIC_CM_EVENT_ADDR_ERROR,
+ XRNIC_CM_EVENT_ADDR_RESOLVED,
+ XRNIC_CM_EVENT_ROUTE_RESOLVED,
+};
+
+struct xrnic_hw_handshake_info {
+ u32 rq_wrptr_db_add;
+ u32 sq_cmpl_db_add;
+ u32 cnct_io_conf_l_16b;
+};
+
+struct xrnic_qp_info {
+ void (*xrnic_rq_event_handler)(u32 rq_count, void *rp_context);
+ void *rq_context;
+ void (*xrnic_sq_event_handler)(u32 cq_head, void *sp_context);
+ void *sq_context;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ void *sq_ba;
+ u64 sq_ba_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sge_size;
+ u32 send_pkt_size;
+ u32 recv_pkt_size;
+ u32 qp_num;
+ u32 starting_psn;
+ struct ernic_pd *pd;
+};
+
+struct xrnic_qp_init_attr {
+ void (*xrnic_rq_event_handler)(u32 rq_count, void *rp_context);
+ void *rq_context;
+ void (*xrnic_sq_event_handler)(u32 cq_head, void *sp_context);
+ void *sq_context;
+ enum xrnic_qp_type qp_type;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ void *sq_ba;
+ u64 sq_ba_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sge_size;
+ u32 send_pkt_size;
+ u32 recv_pkt_size;
+};
+
+struct xrnic_rdma_route {
+ u8 src_addr[16];
+ u8 dst_addr[16];
+ u16 ip_addr_type;
+ u8 smac[ETH_ALEN];
+ u8 dmac[ETH_ALEN];
+ struct sockaddr_storage s_addr;
+ struct sockaddr_storage d_addr;
+};
+
+enum xrnic_port_qp_status {
+ XRNIC_PORT_QP_FREE,
+ XRNIC_PORT_QP_IN_USE,
+};
+
+struct xrnic_rdma_cm_event_info {
+ enum xrnic_rdma_cm_event_type cm_event;
+ int status;
+ void *private_data;
+ u32 private_data_len;
+};
+
+struct xrnic_rdma_conn_param {
+ u8 private_data[XRNIC_CM_PRVATE_DATA_LENGTH];
+ u8 private_data_len;
+ u8 responder_resources;
+ u8 initiator_depth;
+ u8 flow_control;
+ u8 retry_count;
+ u8 rnr_retry_count;
+ u32 qp_num;
+ u32 srq;
+};
+
+enum xrnic_cm_state {
+ XRNIC_CM_REQ_SENT = 0,
+ XRNIC_CM_REP_RCVD,
+ XRNIC_CM_ESTABLISHED,
+};
+
+struct xrnic_rdma_cm_id {
+ int (*xrnic_cm_handler)(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *event);
+ void *cm_context;
+ u32 local_cm_id;
+ u32 remote_cm_id;
+ struct xrnic_qp_info qp_info;
+ struct xrnic_rdma_route route;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+ enum xrnic_port_space ps;
+ enum xrnic_qp_type qp_type;
+ u16 port_num;
+ u16 child_qp_num;
+ struct xrnic_rdma_conn_param conn_param;
+ enum xrnic_port_qp_status qp_status;
+ int cm_state;
+ struct list_head list;
+};
+
+struct xrnic_rdma_cm_id_info {
+ struct xrnic_rdma_cm_id parent_cm_id;
+ struct xrnic_rdma_cm_id *child_cm_id;
+ u32 num_child;
+ struct xrnic_rdma_cm_event_info conn_event_info;
+};
+
+void xrnic_rq_event_handler (u32 rq_count, void *user_arg);
+void xrnic_sq_event_handler (u32 cq_head, void *user_arg);
+int xrnic_cm_handler (struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event_info);
+
+struct xrnic_rdma_cm_id *xrnic_rdma_create_id
+ (int (*xrnic_cm_handler)(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event_info), void *cm_context,
+ enum xrnic_port_space ps, enum xrnic_qp_type qp_type,
+ int num_child_qp);
+
+int xrnic_rdma_bind_addr(struct xrnic_rdma_cm_id *cm_id,
+ u8 *addr, u16 port_num, u16 ip_addr_type);
+
+int xrnic_rdma_listen(struct xrnic_rdma_cm_id *cm_id, int outstanding);
+int xrnic_rdma_create_qp(struct xrnic_rdma_cm_id *cm_id, struct ernic_pd *pd,
+ struct xrnic_qp_init_attr *init_attr);
+int xrnic_rdma_accept(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param);
+int xrnic_post_recv(struct xrnic_qp_info *qp_info, u32 rq_count);
+int xrnic_post_send(struct xrnic_qp_info *qp_info, u32 sq_count);
+int xrnic_destroy_qp(struct xrnic_qp_info *qp_info);
+int xrnic_rdma_disconnect(struct xrnic_rdma_cm_id *cm_id);
+int xrnic_rdma_destroy_id(struct xrnic_rdma_cm_id *cm_id, int flag);
+int xrnic_hw_hs_reset_sq_cq(struct xrnic_qp_info *qp_info,
+ struct xrnic_hw_handshake_info *hw_hs_info);
+int xrnic_hw_hs_reset_rq(struct xrnic_qp_info *qp_info);
+
+int xrnic_rdma_resolve_addr(struct xrnic_rdma_cm_id *cm_id,
+ struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int timeout);
+int xrnic_rdma_connect(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param);
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_IF_H*/
diff --git a/drivers/staging/xlnx_ernic/xioctl.h b/drivers/staging/xlnx_ernic/xioctl.h
new file mode 100644
index 000000000000..8c9738e69383
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xioctl.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+#ifndef _XRNIC_IOCTL_H_
+#define _XRNIC_IOCTL_H_
+
+#include <asm/ioctl.h>
+#include "xlog.h"
+
+#define XRNIC_MAGIC 'L'
+
+#define XRNIC_DISPLAY_MMAP_ALL _IOW(XRNIC_MAGIC, 1, uint)
+#define XRNIC_DISPLAY_MMAP_CONFIG _IOW(XRNIC_MAGIC, 2, uint)
+#define XRNIC_DISPLAY_MMAP_QP1 _IOW(XRNIC_MAGIC, 3, uint)
+#define XRNIC_DISPLAY_MMAP_QPX _IOW(XRNIC_MAGIC, 4, uint)
+#define XRNIC_DISPLAY_PKT _IOW(XRNIC_MAGIC, 5, uint)
+
+#define XRNIC_MAX_CMDS 5
+
+#endif /* _XRNIC_IOCTL_H_ */
diff --git a/drivers/staging/xlnx_ernic/xmain.c b/drivers/staging/xlnx_ernic/xmain.c
new file mode 100644
index 000000000000..67d525b51716
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmain.c
@@ -0,0 +1,1592 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ * Author : Sandeep Dhanvada <sandeep.dhanvada@xilinx.com>
+ * : Anjaneyulu Reddy Mule <anjaneyulu.reddy.mule@xilinx.com>
+ * : Srija Malyala <srija.malyala@xilinx.com>
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/inet.h>
+#include <linux/time.h>
+#include <linux/cdev.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <net/addrconf.h>
+#include <linux/types.h>
+#include "xcommon.h"
+
+/* TODO: Need to remove this macro as all the experimental code is verified.
+ * All the non-experimental code should be deleted.
+ */
+#define EXPERIMENTAL_CODE
+int debug;
+struct class *xrnic_class;
+/* Need to enable this using sysfs.*/
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none, 1=all)");
+
+#define XRNIC_REG_MAP_NODE 0
+#define cpu_to_be24(x) ((x) << 16)
+
+struct xrnic_conn_param {
+ const void *private_data;
+ u8 private_data_len;
+ u8 responder_resources;
+ u8 initiator_depth;
+ u8 flow_control;
+ u8 retry_count;
+ u8 rnr_retry_count;
+ u8 srq;
+ u8 qp_num;
+};
+
+/* EXTRA Bytes for Invariant CRC */
+#define ERNIC_INV_CRC 4
+/* ERNIC Doesn't have Variant CRC for P2P */
+#define ERNIC_VAR_CRC 0
+#define EXTRA_PKT_LEN (ERNIC_INV_CRC + ERNIC_VAR_CRC)
+
+struct xrnic_dev_info *xrnic_dev;
+static dev_t xrnic_dev_number;
+
+/*
+ * To store the IP address of the controller, which is passed as a
+ * module param
+ */
+static char server_ip[16];
+/* To store the port number. This is passed as a module param */
+static unsigned short port_num;
+/* To store the mac_address. This is passed as a module param */
+static ushort mac_address[6] = {0x1, 0x0, 0x0, 0x35, 0x0a, 0x00};
+/* To store the ethernet interface name, which is passed as a module param */
+static char *ifname = "eth0";
+
+module_param(port_num, ushort, 0444);
+MODULE_PARM_DESC(port_num, "network port number");
+
+module_param_array(mac_address, ushort, NULL, 0444);
+MODULE_PARM_DESC(mac_address, "mac address");
+
+module_param_string(server_ip, server_ip, 32, 0444);
+MODULE_PARM_DESC(server_ip, "Target server ip address");
+
+module_param(ifname, charp, 0444);
+MODULE_PARM_DESC(ifname, "Target server interface name eth0..");
+
+/**
+ * xrnic_rdma_create_id() - Creates and RDMA ID
+ * @xrnic_cm_handler: communication event handler
+ * @cm_context: CM context
+ * @ps: Port space
+ * @qp_type: Queue transport type
+ * @num_child: Max QP count
+ *
+ * @return: 0 on success, other value incase of failure
+ */
+struct xrnic_rdma_cm_id *xrnic_rdma_create_id
+ (int (*xrnic_cm_handler)(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event_info), void *cm_context,
+ enum xrnic_port_space ps, enum xrnic_qp_type qp_type, int num_child)
+{
+ struct xrnic_qp_attr *qp1_attr = NULL;
+ struct xrnic_rdma_cm_id *cm_id = NULL;
+ struct xrnic_qp_info *qp_info = NULL;
+ struct xrnic_rdma_cm_id_info *cm_id_info = NULL;
+
+ if (!xrnic_dev) {
+ pr_err("Received NULL pointer\n");
+ return (struct xrnic_rdma_cm_id *)NULL;
+ }
+
+ qp1_attr = &xrnic_dev->qp1_attr;
+ if (xrnic_dev->io_qp_count < num_child ||
+ num_child < 0 || qp_type != qp1_attr->qp_type) {
+ pr_err("Invalid info received\n");
+ return NULL;
+ }
+
+ cm_id_info = kzalloc(sizeof(*cm_id_info), GFP_KERNEL);
+ if (!cm_id_info)
+ return ERR_PTR(-ENOMEM);
+
+ xrnic_dev->curr_cm_id_info = cm_id_info;
+ cm_id = (struct xrnic_rdma_cm_id *)&cm_id_info->parent_cm_id;
+ cm_id->xrnic_cm_handler = xrnic_cm_handler;
+ cm_id->cm_context = cm_context;
+ cm_id->ps = ps;
+ cm_id->qp_type = qp_type;
+ cm_id->cm_id_info = cm_id_info;
+ cm_id->child_qp_num = 0;
+ cm_id->qp_status = XRNIC_PORT_QP_FREE;
+
+ qp_info = &cm_id->qp_info;
+ memset(qp_info, 0, sizeof(*qp_info));
+
+ qp_info->qp_num = qp1_attr->qp_num;
+ list_add_tail(&cm_id->list, &cm_id_list);
+
+ return cm_id;
+}
+EXPORT_SYMBOL(xrnic_rdma_create_id);
+
+/**
+ * ipv6_addr_compare() - Compares IPV6 addresses
+ * @addr1: Address 1 to compare
+ * @addr2: Address 2 to compare
+ * @size: size of the address
+ *
+ * @return: 0 on success, -1 incase of a mismatch
+ */
+static int ipv6_addr_compare(u8 *addr1, u8 *addr2, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (addr1[(size - 1) - i] != addr2[i])
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * xrnic_rdma_bind_addr() - Binds IP-V4/V6 addresses
+ * @cm_id: CM ID to with address CM info
+ * @addr: Address to bind to
+ * @port_num: Tranport port number
+ * @ip_addr_type: IP-V4/V6
+ *
+ * @return: 0 on success, error indicative value incase of failure
+ */
+int xrnic_rdma_bind_addr(struct xrnic_rdma_cm_id *cm_id,
+ u8 *addr, u16 port_num, u16 ip_addr_type)
+{
+ if (!cm_id || !xrnic_dev) {
+ pr_err("Invalid CM ID or XRNIC device info\n");
+ return -EINVAL;
+ }
+
+ if (xrnic_dev->curr_cm_id_info != cm_id->cm_id_info)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (port_num < 1UL || port_num > XRNIC_MAX_PORT_SUPPORT)
+ return -XRNIC_INVALID_PORT;
+
+ if (!cm_id)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (cm_id->child_qp_num)
+ return -XRNIC_INVALID_CHILD_NUM;
+
+ if (xrnic_dev->cm_id_info[port_num - 1])
+ return -XRNIC_INVALID_PORT;
+
+ if (xrnic_dev->port_status[port_num - 1] == XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_CM_ID;
+
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_CM_ID;
+
+ if (ip_addr_type == AF_INET6) {
+ if (ipv6_addr_compare((u8 *)&xrnic_dev->ipv6_addr, addr,
+ sizeof(struct in6_addr)))
+ return -XRNIC_INVALID_ADDR;
+ memcpy((void *)&cm_id->route.src_addr, (void *)addr,
+ sizeof(struct in6_addr));
+ } else if (ip_addr_type == AF_INET) {
+ if (memcmp(&xrnic_dev->ipv4_addr, addr,
+ sizeof(struct in_addr)))
+ return -XRNIC_INVALID_ADDR;
+ memcpy((void *)&cm_id->route.src_addr, (void *)addr,
+ sizeof(struct in_addr));
+ } else {
+ return -XRNIC_INVALID_ADDR_TYPE;
+ }
+ xrnic_dev->cm_id_info[port_num - 1] = cm_id->cm_id_info;
+ cm_id->port_num = port_num;
+ cm_id->route.ip_addr_type = ip_addr_type;
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_bind_addr);
+
+/**
+ * xrnic_rdma_listen() - Initiates listen on the socket
+ * @cm_id: CM ID
+ * @backlog: back log
+ *
+ * @return: 0 on success, error indicative value incase of failure
+ */
+int xrnic_rdma_listen(struct xrnic_rdma_cm_id *cm_id, int backlog)
+{
+ if (!cm_id || !xrnic_dev) {
+ pr_err("Rx invalid pointers\n");
+ return -EINVAL;
+ }
+
+ if (xrnic_dev->curr_cm_id_info != cm_id->cm_id_info)
+ return XRNIC_INVALID_CM_ID;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1] ==
+ XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_PORT;
+
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_QP_ID;
+
+ xrnic_dev->port_status[cm_id->port_num - 1] = XRNIC_PORT_QP_IN_USE;
+ xrnic_dev->curr_cm_id_info = NULL;
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_listen);
+
+/**
+ * xrnic_hw_hs_reset_sq_cq() - Enables HW Handshake for a given QP
+ * @qp_info: QP which should be enabled for HW Handshake
+ * @hw_hs_info: HW Handshake info with which QP config needs to be updated
+ *
+ * @return: XRNIC_SUCCESS on success, error indicative value incase of failure
+ */
+int xrnic_hw_hs_reset_sq_cq(struct xrnic_qp_info *qp_info,
+ struct xrnic_hw_handshake_info *hw_hs_info)
+{
+ struct xrnic_qp_attr *qp_attr;
+
+ if (!qp_info) {
+ pr_err("Rx invalid qp info\n");
+ return -EINVAL;
+ }
+
+ if (!xrnic_dev) {
+ pr_err("Invalid ERNIC info\n");
+ return -EINVAL;
+ }
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ xrnic_reset_io_qp_sq_cq_ptr(qp_attr, hw_hs_info);
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_hw_hs_reset_sq_cq);
+
+/**
+ * xrnic_hw_hs_reset_rq() - Updates HW handshake for RQ
+ * @qp_info: QP which should be enabled for HW Handshake
+ *
+ * @return: XRNIC_SUCCESS on success, error indicative value incase of failure
+ */
+int xrnic_hw_hs_reset_rq(struct xrnic_qp_info *qp_info)
+{
+ struct xrnic_qp_attr *qp_attr;
+
+ if (!qp_info) {
+ pr_err("Rx invalid qp info\n");
+ return -EINVAL;
+ }
+
+ if (!xrnic_dev) {
+ pr_err("Invalid ERNIC info\n");
+ return -EINVAL;
+ }
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ xrnic_reset_io_qp_rq_ptr(qp_attr);
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_hw_hs_reset_rq);
+
+/**
+ * set_ipv4_ipaddress() - Configures XRNIC IP address
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int set_ipv4_ipaddress(void)
+{
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ u32 config_value = 0;
+ u32 ipv4_addr = 0;
+ struct net_device *dev = __dev_get_by_name(&init_net, ifname);
+ struct in_device *inet_dev;
+
+ inet_dev = (struct in_device *)dev->ip_ptr;
+
+ if (!dev) {
+ pr_err("CMAC interface not configured\n");
+ return XRNIC_FAILED;
+ }
+
+ if (((struct in_device *)dev->ip_ptr)->ifa_list) {
+ ipv4_addr = inet_dev->ifa_list->ifa_address;
+ if (!ipv4_addr) {
+ pr_err("cmac ip addr: ifa_address not available\n");
+ return XRNIC_FAILED;
+ }
+ snprintf(server_ip, 16, "%pI4", &ipv4_addr);
+ in4_pton(server_ip, strlen(server_ip), xrnic_dev->ipv4_addr,
+ '\0', NULL);
+ DEBUG_LOG("xcmac ip_address:%s\n", server_ip);
+ } else {
+ pr_info("xcmac ip address: not available at present\n");
+ return 0;
+ }
+
+ switch (dev->mtu) {
+ case 340:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_256;
+ break;
+ case 592:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_512;
+ break;
+ case 1500:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_1024;
+ break;
+ case 2200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_2048;
+ break;
+ case 4200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ break;
+ default:
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ }
+ config_value = (xrnic_dev->ipv4_addr[3] << 0) |
+ (xrnic_dev->ipv4_addr[2] << 8) |
+ (xrnic_dev->ipv4_addr[1] << 16) |
+ (xrnic_dev->ipv4_addr[0] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_config->ipv4_address)));
+ DEBUG_LOG("XRNIC IPV4 address [%x]\n", config_value);
+ return 0;
+}
+
+/**
+ * set_ipv6_ipaddress() - Configures XRNIC IPV6 address
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int set_ipv6_ipaddress(void)
+{
+ struct xrnic_ctrl_config *xrnic_ctrl_conf;
+ u32 config_value = 0;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp, *tmp;
+ u8 i, ip6_set = 0;
+ struct net_device *dev = __dev_get_by_name(&init_net, ifname);
+
+ xrnic_ctrl_conf = &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ if (!dev) {
+ pr_err("CMAC interface not configured\n");
+ return XRNIC_FAILED;
+ }
+
+ idev = __in6_dev_get(dev);
+ if (!idev) {
+ pr_err("ipv6 inet device not found\n");
+ return 0;
+ }
+
+ list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
+ DEBUG_LOG("IP=%pI6, MAC=%pM\n", &ifp->addr, dev->dev_addr);
+ for (i = 0; i < 16; i++) {
+ DEBUG_LOG("IP=%x\n", ifp->addr.s6_addr[i]);
+ xrnic_dev->ipv6_addr[15 - i] = ifp->addr.s6_addr[i];
+ }
+ ip6_set = 1;
+ }
+ if (ip6_set == 0) {
+ pr_info("xcmac ipv6 address: not available at present\n");
+ return 0;
+ }
+
+ switch (dev->mtu) {
+ case 340:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_256;
+ break;
+ case 592:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_512;
+ break;
+ case 1500:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_1024;
+ break;
+ case 2200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_2048;
+ break;
+ case 4200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ break;
+ default:
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ }
+ config_value = (xrnic_dev->ipv6_addr[0] << 0) |
+ (xrnic_dev->ipv6_addr[1] << 8) |
+ (xrnic_dev->ipv6_addr[2] << 16) |
+ (xrnic_dev->ipv6_addr[3] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr1)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+
+ config_value = (xrnic_dev->ipv6_addr[4] << 0) |
+ (xrnic_dev->ipv6_addr[5] << 8) |
+ (xrnic_dev->ipv6_addr[6] << 16) |
+ (xrnic_dev->ipv6_addr[7] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr2)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+
+ config_value = (xrnic_dev->ipv6_addr[8] << 0) |
+ (xrnic_dev->ipv6_addr[9] << 8) |
+ (xrnic_dev->ipv6_addr[10] << 16) |
+ (xrnic_dev->ipv6_addr[11] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr3)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+
+ config_value = (xrnic_dev->ipv6_addr[12] << 0) |
+ (xrnic_dev->ipv6_addr[13] << 8) |
+ (xrnic_dev->ipv6_addr[14] << 16) |
+ (xrnic_dev->ipv6_addr[15] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr4)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+ return 0;
+}
+
+/**
+ * cmac_inet6addr_event() - Handles IPV6 events
+ * @notifier: notifier info
+ * @event: Rx event
+ * @data: Event specific data
+ *
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int cmac_inet6addr_event(struct notifier_block *notifier,
+ unsigned long event, void *data)
+{
+ switch (event) {
+ case NETDEV_DOWN:
+ pr_info("Driver link down\r\n");
+ break;
+ case NETDEV_UP:
+ pr_info("Driver link up ipv6\r\n");
+ if (set_ipv6_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ case NETDEV_CHANGEADDR:
+ pr_info("Driver link change address ipv6\r\n");
+ if (set_ipv6_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ }
+ return 0;
+}
+
+/**
+ * cmac_inetaddr_event() - Handles IPV4 events
+ * @notifier: notifier info
+ * @event: Rx event
+ * @data: Event specific data
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int cmac_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event, void *data)
+{
+ struct in_ifaddr *ifa = data;
+ struct net_device *event_netdev = ifa->ifa_dev->dev;
+ struct net_device *dev = __dev_get_by_name(&init_net, ifname);
+
+ if (!dev) {
+ pr_err("CMAC interface not configured\n");
+ return XRNIC_FAILED;
+ }
+
+ if (event_netdev != dev)
+ return 0;
+ pr_info("Xrnic: event = %ld\n", event);
+ switch (event) {
+ case NETDEV_DOWN:
+ pr_info("Xrnic: link down\n");
+ break;
+ case NETDEV_UP:
+ pr_info("Xrnic: link up\n");
+ if (set_ipv4_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ case NETDEV_CHANGEADDR:
+ pr_info("Xrnic: ip address change detected\n");
+ if (set_ipv4_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ }
+ return 0;
+}
+
+struct notifier_block cmac_inetaddr_notifier = {
+ .notifier_call = cmac_inetaddr_event
+};
+
+struct notifier_block cmac_inet6addr_notifier = {
+ .notifier_call = cmac_inet6addr_event
+};
+
+static const struct file_operations xrnic_fops = {
+ /*TODO: Implement read/write/ioctl operations. */
+ .owner = THIS_MODULE, /* Owner */
+};
+
+/**
+ * xrnic_irq_handler() - XRNIC interrupt handler
+ * @irq: Irq number
+ * @data: Pointer to XRNIC device info structure
+ *
+ * @return: IRQ_HANDLED incase of success or other value in case of failure
+ */
+static irqreturn_t xrnic_irq_handler(int irq, void *data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ u32 config_value = 0;
+ unsigned long flag;
+
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ config_value = ioread32((void *)&xrnic_ctrl_config->intr_sts);
+
+ /* We are checking masked interrupt.*/
+ config_value = config_value & xrnic_dev->xrnic_mmap.intr_en;
+ if (!config_value)
+ pr_err("Rx disabled or masked interrupt\n");
+
+ if (config_value & PKT_VALID_ERR_INTR_EN) {
+ pr_info("Packet validation fail interrupt rx\n");
+ iowrite32(PKT_VALID_ERR_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & MAD_PKT_RCVD_INTR_EN) {
+ DEBUG_LOG("MAD Packet rx interrupt\n");
+ /* Clear the interrupt */
+ iowrite32(MAD_PKT_RCVD_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ /* process the MAD pkt */
+ tasklet_schedule(&xrnic_dev->mad_pkt_recv_task);
+ }
+
+ if (config_value & BYPASS_PKT_RCVD_INTR_EN) {
+ DEBUG_LOG("Bypass packet Rx interrupt\n");
+ iowrite32(BYPASS_PKT_RCVD_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & RNR_NACK_GEN_INTR_EN) {
+ DEBUG_LOG("Rx RNR Nack interrupt\n");
+ iowrite32(RNR_NACK_GEN_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & WQE_COMPLETED_INTR_EN) {
+ DEBUG_LOG("Rx WQE completion interrupt\n");
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en &
+ (~WQE_COMPLETED_INTR_EN);
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ tasklet_schedule(&xrnic_dev->wqe_completed_task);
+ }
+
+ if (config_value & ILL_OPC_SENDQ_INTR_EN) {
+ DEBUG_LOG("Rx illegal opcode interrupt\n");
+ iowrite32(ILL_OPC_SENDQ_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & QP_PKT_RCVD_INTR_EN) {
+ DEBUG_LOG("Rx data packet interrupt\n");
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en &
+ (~QP_PKT_RCVD_INTR_EN);
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ tasklet_schedule(&xrnic_dev->qp_pkt_recv_task);
+ }
+
+ if (config_value & FATAL_ERR_INTR_EN) {
+ pr_info("Rx Fatal error interrupt\n");
+
+ iowrite32(FATAL_ERR_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ /* 0 is some random value*/
+ xrnic_qp_fatal_handler(0);
+ }
+
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xrnic_ctrl_hw_configuration() - Xrnic control configuration initizations
+ * @return: 0 on success, other value incase of failure
+ */
+static int xrnic_ctrl_hw_configuration(void)
+{
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct xrnic_ctrl_config *xrnic_ctrl_conf;
+ u32 config_value = 0;
+ struct net_device *dev = NULL;
+
+ xrnic_ctrl_conf = &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+
+ if (!xrnic_dev || !xrnic_dev->xrnic_mmap.xrnic_regs ||
+ !xrnic_ctrl_conf) {
+ pr_err("Invalid device pointers\n");
+ return -EINVAL;
+ }
+
+ xrnic_mmap = &xrnic_dev->xrnic_mmap;
+
+ dev = __dev_get_by_name(&init_net, ifname);
+ if (!dev) {
+ pr_err("Ethernet mac address not configured\n");
+ return XRNIC_FAILED;
+ }
+ /* Set the MAC address */
+ config_value = dev->dev_addr[5] | (dev->dev_addr[4] << 8) |
+ (dev->dev_addr[3] << 16) | (dev->dev_addr[2] << 24);
+ DEBUG_LOG("Source MAC address LSB [%x]\n", config_value);
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->mac_xrnic_src_addr_lsb)));
+
+ DEBUG_LOG("Source MAC address LSB [%x]\n", config_value);
+ config_value = dev->dev_addr[1] | (dev->dev_addr[0] << 8);
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->mac_xrnic_src_addr_msb)));
+ DEBUG_LOG("Source MAC address MSB [%x]\n", config_value);
+
+ if (set_ipv4_ipaddress() == XRNIC_FAILED) {
+ pr_err("ETH0 AF_INET address: ifa_list not available.\n");
+ return XRNIC_FAILED;
+ }
+
+ if (set_ipv6_ipaddress() == XRNIC_FAILED) {
+ pr_err("ETH0 AF_INET6 address: ifa_list not available.\n");
+ return XRNIC_FAILED;
+ }
+
+ /* At present 128 TX headers and each size 128 bytes */
+ config_value = xrnic_mmap->tx_hdr_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_hdr_buf_ba)));
+ DEBUG_LOG("Tx header buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_TX_HDR | (XRNIC_SIZE_OF_TX_HDR << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_hdr_buf_sz)));
+ DEBUG_LOG("Tx header buf size [0x%x]\n", config_value);
+
+ /* At present 256 TX SGL and each size 16 bytes */
+ config_value = xrnic_mmap->tx_sgl_buf_ba_phys & 0xffffffff;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_sgl_buf_ba)));
+ DEBUG_LOG("Tx SGL buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_TX_SGL | (XRNIC_SIZE_OF_TX_SGL << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_sgl_buf_sz)));
+ DEBUG_LOG("Tx SGL buf size [0x%x]\n", config_value);
+
+ /* At present 32 Bypass buffers and each size 512 bytes */
+ config_value = xrnic_mmap->bypass_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->bypass_buf_ba)));
+ DEBUG_LOG("Bypass buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_BYPASS_BUF |
+ (XRNIC_SIZE_OF_BYPASS_BUF << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->bypass_buf_sz)));
+ DEBUG_LOG("Bypass buf size [0x%x]\n", config_value);
+
+ config_value = XRNIC_BYPASS_BUF_WRPTR;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->bypass_buf_wrptr)));
+ DEBUG_LOG("Bypass buffer write pointer [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->err_pkt_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->err_pkt_buf_ba)));
+ DEBUG_LOG("Error packet buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_ERROR_BUF |
+ (XRNIC_SIZE_OF_ERROR_BUF << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->err_pkt_buf_sz)));
+ DEBUG_LOG("Error packet buf size [0x%x]\n", config_value);
+
+ config_value = XRNIC_ERROR_BUF_WRPTR;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->err_buf_wrptr)));
+ DEBUG_LOG("Error pakcet buf write pointer [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->out_errsts_q_ba_phys;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->out_errsts_q_ba)));
+ DEBUG_LOG("Outgoing error status queue base address [0x%x]\n",
+ config_value);
+
+ config_value = XRNIC_OUT_ERRST_Q_NUM_ENTRIES;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->out_errsts_q_sz)));
+ DEBUG_LOG("Outgoing error status queue size [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->in_errsts_q_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->in_errsts_q_ba)));
+ DEBUG_LOG("Incoming error status queue base address [0x%x]\n",
+ config_value);
+
+ config_value = XRNIC_IN_ERRST_Q_NUM_ENTRIES;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->in_errsts_q_sz)));
+ DEBUG_LOG("Incoming error status queue size [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->data_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->data_buf_ba)));
+ DEBUG_LOG("RDMA Outgoing data buf base addr [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_DATA_BUF | (XRNIC_SIZE_OF_DATA_BUF << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->data_buf_sz)));
+ DEBUG_LOG("RDMA Outgoing data buf size [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->resp_err_pkt_buf_ba_phys;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->resp_err_pkt_buf_ba)));
+ DEBUG_LOG("Response error packet buf base address [0x%x]\n",
+ config_value);
+
+ config_value = XRNIC_NUM_OF_RESP_ERR_BUF |
+ (XRNIC_SIZE_OF_RESP_ERR_BUF << 16);
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->resp_err_buf_sz)));
+ DEBUG_LOG("Response error packet buf size [0x%x]\n", config_value);
+
+ /* Enable the RNIC configuration*/
+ config_value = (XRNIC_CONFIG_XRNIC_EN |
+ XRNIC_CONFIG_ERR_BUF_EN |
+ XRNIC_CONFIG_NUM_QPS_ENABLED |
+ XRNIC_CONFIG_FLOW_CONTROL_EN |
+ XRNIC_CONFIG_UDP_SRC_PORT);
+
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->xrnic_conf)));
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_ctrl_hw_init() - Xrnic control configuration initizations
+ * @return: 0 on success, other value incase of failure
+ */
+static int xrnic_ctrl_hw_init(void)
+{
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ u32 config_value = 0;
+ int ret = 0, i;
+
+ /* Invoking rnic global initialization configuration */
+ ret = xrnic_ctrl_hw_configuration();
+ if (ret) {
+ pr_err("xrnic hw config failed with ret code [%d]\n", ret);
+ return ret;
+ }
+
+ /* Invoking RDMA QP1 configuration */
+ ret = xrnic_qp1_hw_configuration();
+ if (ret) {
+ pr_err("xrnic qp1 config failed with ret code [%d]\n", ret);
+ return ret;
+ }
+
+ /* Invoking RDMA other data path QP configuration, as we are not
+ * resgistring any data path interrupt handler so no ret.
+ */
+ for (i = 0; i < XRNIC_MAX_QP_SUPPORT; i++)
+ xrnic_qp_hw_configuration(i);
+
+ /* Enabling xrnic interrupts. */
+ config_value = MAD_PKT_RCVD_INTR_EN |
+ RNR_NACK_GEN_INTR_EN |
+ WQE_COMPLETED_INTR_EN | ILL_OPC_SENDQ_INTR_EN |
+ QP_PKT_RCVD_INTR_EN | FATAL_ERR_INTR_EN;
+
+ if (config_value & ~XRNIC_INTR_ENABLE_DEFAULT) {
+ DEBUG_LOG("Setting the default interrupt enable config\n");
+ config_value = XRNIC_INTR_ENABLE_DEFAULT;
+ }
+
+ /*Writing to interrupt enable register.*/
+ xrnic_dev->xrnic_mmap.intr_en = config_value;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_config->intr_en)));
+
+ DEBUG_LOG("Interrupt enable reg value [%#x]\n",
+ ioread32((void __iomem *)&xrnic_ctrl_config->intr_en));
+ return ret;
+}
+
+/**
+ * xrnic_fill_wr() - This function fills the Send queue work request info
+ * @qp_attr: qp config info to fill the WR
+ * @qp_depth: Depth of the Queue
+ */
+void xrnic_fill_wr(struct xrnic_qp_attr *qp_attr, u32 qp_depth)
+{
+ int i;
+ struct wr *sq_wr; /*sq_ba*/
+
+ for (i = 0; i < qp_depth; i++) {
+ sq_wr = (struct wr *)qp_attr->sq_ba + i;
+ sq_wr->ctx.wr_id = i;
+ sq_wr->local_offset[0] = (qp_attr->send_sgl_phys & 0xffffffff)
+ + (i * XRNIC_SEND_SGL_SIZE);
+ sq_wr->local_offset[1] = 0;
+ sq_wr->length = XRNIC_SEND_SGL_SIZE;
+ sq_wr->opcode = XRNIC_SEND_ONLY;
+ sq_wr->remote_offset[0] = 0;
+ sq_wr->remote_offset[1] = 0;
+ sq_wr->remote_tag = 0;
+ }
+}
+
+static int xernic_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device_node *np = NULL;
+ struct resource resource;
+ void __iomem *virt_addr;
+ u64 start_addr;
+ int status;
+ int len;
+/* TODO: Not using pdev. Rather using a global data structure xrnic_dev,
+ * which is shared among all the objects in ernic driver.
+ * Need to set platform private data as xrnic_dev and all the objects of
+ * ernic driver has to retrieve from platform_device pointer.
+ */
+#ifdef EXPERIMENTAL_CODE
+ int val = 0;
+#endif
+ phys_addr_t phy_addr;
+
+ pr_info("XRNIC driver Version = %s\n", XRNIC_VERSION);
+
+ register_inetaddr_notifier(&cmac_inetaddr_notifier);
+ register_inet6addr_notifier(&cmac_inet6addr_notifier);
+ init_mr(MEMORY_REGION_BASE, MEMORY_REGION_LEN);
+
+ np = of_find_node_by_name(NULL, "ernic");
+ if (!np) {
+ pr_err("xrnic can't find compatible node in device tree.\n");
+ return -ENODEV;
+ }
+
+ xrnic_dev = kzalloc(sizeof(*xrnic_dev), GFP_KERNEL);
+ if (!xrnic_dev)
+ return -ENOMEM;
+ ret = alloc_chrdev_region(&xrnic_dev_number, 0,
+ NUM_XRNIC_DEVS, DEVICE_NAME);
+ if (ret) {
+ DEBUG_LOG("XRNIC:: Failed to register char device\n");
+ goto alloc_failed;
+ } else {
+ DEBUG_LOG(KERN_INFO "XRNIC Registered with :\n");
+ DEBUG_LOG(KERN_INFO "Major : %u || ", MAJOR(xrnic_dev_number));
+ DEBUG_LOG(KERN_INFO "Minor : %u\n", MINOR(xrnic_dev_number));
+ }
+/* TODO: xrnic_class is created but not used. Need to enable debug and
+ * statistic counters though this interface.
+ */
+ xrnic_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(xrnic_class)) {
+ ret = PTR_ERR(xrnic_class);
+ goto class_failed;
+ }
+
+ /* Connect the file operations with the cdev */
+ /* TODO: cdev created but not used. Need to implement when
+ * userspace applications are implemented. Currently all the
+ * callbacks in xrnic_fops are dummy.
+ */
+ cdev_init(&xrnic_dev->cdev, &xrnic_fops);
+ xrnic_dev->cdev.owner = THIS_MODULE;
+
+ /* Connect the major/minor number to the cdev */
+ ret = cdev_add(&xrnic_dev->cdev, xrnic_dev_number, 1);
+ if (IS_ERR(ERR_PTR(ret))) {
+ DEBUG_LOG("ERROR: XRNIC cdev allocation failed\n");
+ goto cdev_failed;
+ }
+
+ device_create(xrnic_class, NULL, xrnic_dev_number, NULL,
+ "%s", "xrnic0");
+
+ /* The node offset argument 0 xrnic 0x0 0x84000000 len 128K*/
+ ret = of_address_to_resource(np, XRNIC_REG_MAP_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 0.\n");
+ goto dev_failed;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_REG_MAP_NODE);
+ DEBUG_LOG("xrnic memory 0x%llx of size=%x bytes mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.xrnic_regs_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.xrnic_regs = (struct xrnic_reg_map *)virt_addr;
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (IS_ERR_VALUE(phy_addr)) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.tx_hdr_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory Tx HDR BUF Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys);
+#else
+ /*Mapping for Xrnic TX HEADERS 0x20100000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_TX_HDR_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 5.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_TX_HDR_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory TX header 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (IS_ERR_VALUE(phy_addr)) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.tx_sgl_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory Tx SGL Buf Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys);
+#else
+ /*Mapping for Xrnic TX DMA SGL 0xB4000000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_TX_SGL_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 6.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_TX_SGL_BUF_NODE);
+ DEBUG_LOG("xrnic memory TX SGL 0x%llx of size=%x\n",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba = (void *)(uintptr_t)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (IS_ERR_VALUE(phy_addr)) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.bypass_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.bypass_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.bypass_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory Bypass Buf Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.bypass_buf_ba_phys);
+#else
+ /*Mapping for Xrnic BYPASS PL 0x20120000 to 16 kb.*/
+ /*Mapping for Xrnic BYPASS PS 0x20120000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_BYPASS_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 7.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_BYPASS_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory BYPASS:0x%llx of siz:%xb mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.bypass_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.bypass_buf_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_NUM_OF_ERROR_BUF * XRNIC_SIZE_OF_ERROR_BUF;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.err_pkt_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory ERR PKT Buf Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys);
+#else
+ /*Mapping for Xrnic ERROR-DROPP PL 0x20110000 to 16 kb.*/
+ /*Mapping for Xrnic ERROR-DROPP PS 0x20110000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_ERRPKT_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 8.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_ERRPKT_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory ERROR PKT 0x%llx of size=%x\n",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_OUT_ERRST_Q_NUM_ENTRIES;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.out_errsts_q_ba, 0, len);
+ DEBUG_LOG("xrnic memory OUT ERR STS Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys);
+#else
+ /*Mapping for Xrnic OUT ERR_STS 0x29000000 to 4 kb.*/
+ ret = of_address_to_resource(np, XRNIC_OUTERR_STS_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 9.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_OUTERR_STS_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory 0x%llx of size=%x bytes mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_IN_ERRST_Q_NUM_ENTRIES;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.in_errsts_q_ba, 0, len);
+ DEBUG_LOG("xrnic memory IN ERR STS Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys);
+#else
+ /*Mapping for Xrnic IN ERR_STS PL 0x29001000 to 16 kb.*/
+ /*Mapping for Xrnic IN ERR_STS PS 0x29001000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_INERR_STS_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 10.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_INERR_STS_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory 0x%llx of size=%x bytes mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba = (void *)virt_addr;
+#endif
+
+ /*Mapping for Xrnic RQ WR DBRL PL 0x29002000 to 4 kb.*/
+ /*Mapping for Xrnic RQ WR DBRL PS 0x29002000 to 4 kb.*/
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_NUM_OF_DATA_BUF * XRNIC_SIZE_OF_DATA_BUF;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.data_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.data_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.data_buf_ba, 0, len);
+#else
+ /*Mapping for Xrnic RQ STATUS PER QP 0x29040000 to 4 kb.*/
+ ret = of_address_to_resource(np, XRNIC_DATA_BUF_BA_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 14.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_DATA_BUF_BA_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory DATA BUFF BA 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.data_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.data_buf_ba = (void *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_NUM_OF_RESP_ERR_BUF * XRNIC_SIZE_OF_RESP_ERR_BUF;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba, 0, len);
+#else
+ /*Mapping for Xrnic RQ STATUS PER QP 0x20130000 to 16kb.*/
+ ret = of_address_to_resource(np, XRNIC_RESP_ERR_PKT_BUF_BA, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 14.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_RESP_ERR_PKT_BUF_BA);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic response error packet buffer base address [0x%llx]",
+ start_addr);
+ DEBUG_LOG(" of size=%x bytes mapped at 0x%p\n",
+ (u32)resource.end - (u32)resource.start, virt_addr);
+
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba = (void *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_SEND_SGL_SIZE * XRNIC_SQ_DEPTH;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.send_sgl_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.send_sgl =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+
+ memset(xrnic_dev->xrnic_mmap.send_sgl, 0, len);
+ DEBUG_LOG("xrnic memory Send SGL Base Addr = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.send_sgl_phys);
+
+#else /* EXPERIMENTAL_CODE */
+ ret = of_address_to_resource(np, XRNIC_SEND_SGL_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 1.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_SEND_SGL_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+
+ DEBUG_LOG("xrnic memory send sgl 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.send_sgl_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.send_sgl = (void *)virt_addr;
+#endif /* EXPERIMENTAL_CODE */
+
+ DEBUG_LOG("send SGL physical address :%llx\n",
+ xrnic_dev->xrnic_mmap.send_sgl_phys);
+ DEBUG_LOG("xrnic mmap:%p\n", &xrnic_dev->xrnic_mmap);
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_SQ_DEPTH * sizeof(struct xrnic_cqe);
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.cq_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.cq_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.cq_ba, 0, len);
+ DEBUG_LOG("xrnic memory CQ BA Base Addr = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.cq_ba_phys);
+
+#else
+ ret = of_address_to_resource(np, XRNIC_CQ_BA_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 2.\n");
+ goto mem_config_err;
+ }
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_CQ_BA_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory send CQ 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.cq_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.cq_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_RECV_PKT_SIZE * XRNIC_RQ_DEPTH;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+
+ memset(xrnic_dev->xrnic_mmap.rq_buf_ba_ca, 0, len);
+ DEBUG_LOG("xrnic memory Receive Q Buffer = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys);
+
+#else /* EXPERIMENTAL_CODE */
+ ret = of_address_to_resource(np, XRNIC_RQ_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 3.\n");
+ goto mem_config_err;
+ }
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_RQ_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory receive Q Buf 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca = (void *)virt_addr;
+#endif /* EXPERIMENTAL_CODE */
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_SEND_PKT_SIZE * XRNIC_SQ_DEPTH;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.sq_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.sq_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.sq_ba, 0, len);
+ DEBUG_LOG("xrnic memory Send Q Base Addr = %#x, %llx.\n",
+ val, xrnic_dev->xrnic_mmap.sq_ba_phys);
+#else
+ ret = of_address_to_resource(np, XRNIC_SQ_BA_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 4.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_SQ_BA_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory SEND Q 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.sq_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.sq_ba = (struct wr *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.rq_wrptr_db_add, 0, len);
+#else
+ ret = of_address_to_resource(np, XRNIC_RQWR_PTR_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 11.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_RQWR_PTR_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory RQ WPTR 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.sq_cmpl_db_add, 0, len);
+#else
+ ret = of_address_to_resource(np, XRNIC_SQ_CMPL_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 12.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_SQ_CMPL_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory SQ CMPL 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG("bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add = (void *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.stat_rq_buf_ca, 0, len);
+#else
+ ret = of_address_to_resource(np, XRNIC_STAT_XRNIC_RQ_BUF_NODE,
+ &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 13.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_STAT_XRNIC_RQ_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory STAT RQ BUF 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca = (void *)virt_addr;
+#endif
+ xrnic_dev->io_qp_count = XRNIC_MAX_QP_SUPPORT;
+ /* XRNIC controller H/W configuration which includes XRNIC
+ * global configuration, QP1 initialization and interrupt enable.
+ */
+ ret = xrnic_ctrl_hw_init();
+ if (ret < 0) {
+ pr_err("xrnic hw init failed.\n");
+ goto mem_config_err;
+ }
+ /* TODO: Currently, ERNIC IP is exporting 8 interrupt lines in DTS.
+ * But, IP will assert only first interrupt line for all 8 lines.
+ * Internally, all 8 lines are logically ORed and given as
+ * Single interrupt with interrupt status register showing which
+ * line is asserted. So, we are parsing just the 0th index of irq_map
+ * from DTS and in interrupt handler routine, we are reading the
+ * interrupt status register to identify which interrupt is asserted.
+ *
+ * Need to fix the design to export only 1 interrupt line in DTS.
+ */
+ xrnic_dev->xrnic_irq = irq_of_parse_and_map(np, 0);
+ if (!xrnic_dev->xrnic_irq) {
+ pr_err("xrnic can't determine irq.\n");
+ ret = XRNIC_FAILED;
+ }
+ status = request_irq(xrnic_dev->xrnic_irq, xrnic_irq_handler, 0,
+ "xrnic_irq", xrnic_dev);
+ if (status) {
+ pr_err("XRNIC irq request handler failed\n");
+ goto err_irq;
+ }
+
+ tasklet_init(&xrnic_dev->mad_pkt_recv_task,
+ xrnic_mad_pkt_recv_intr_handler,
+ (unsigned long)xrnic_dev);
+ tasklet_init(&xrnic_dev->qp_pkt_recv_task,
+ xrnic_qp_pkt_recv_intr_handler, (unsigned long)xrnic_dev);
+ tasklet_init(&xrnic_dev->qp_fatal_task,
+ xrnic_qp_fatal_handler, (unsigned long)xrnic_dev);
+ tasklet_init(&xrnic_dev->wqe_completed_task,
+ xrnic_wqe_completed_intr_handler,
+ (unsigned long)xrnic_dev);
+ INIT_LIST_HEAD(&cm_id_list);
+
+ return XRNIC_SUCCESS;
+err_irq:
+mem_config_err:
+/* free_mem() works on only valid physical address returned from alloc_mem(),
+ * and ignores if NULL or invalid address is passed.
+ * So, even if any of the above allocations fail in the middle,
+ * we can safely call free_mem() on all addresses.
+ *
+ * we are using carve-out memory for the requirements of ERNIC.
+ * so, we cannot use devm_kzalloc() as kernel cannot see these
+ * memories until ioremapped.
+ */
+ iounmap(xrnic_dev->xrnic_mmap.xrnic_regs);
+ free_mem(xrnic_dev->xrnic_mmap.send_sgl_phys);
+ free_mem(xrnic_dev->xrnic_mmap.cq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.bypass_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.data_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys);
+
+dev_failed:
+ /* Remove the cdev */
+ cdev_del(&xrnic_dev->cdev);
+
+ /* Remove the device node entry */
+ device_destroy(xrnic_class, xrnic_dev_number);
+
+cdev_failed:
+ /* Destroy xrnic_class */
+ class_destroy(xrnic_class);
+
+class_failed:
+ /* Release the major number */
+ unregister_chrdev_region(MAJOR(xrnic_dev_number), 1);
+
+alloc_failed:
+ kfree(xrnic_dev);
+ return ret;
+}
+
+static int xernic_remove(struct platform_device *pdev)
+{
+/* TODO: Not using pdev. Rather using a global data structure xrnic_dev,
+ * which is shared among all the objects in ernic driver.
+ * Need to get xrnic_dev from platform_device pointer.
+ */
+ iounmap(xrnic_dev->xrnic_mmap.xrnic_regs);
+ free_mem(xrnic_dev->xrnic_mmap.send_sgl_phys);
+ free_mem(xrnic_dev->xrnic_mmap.cq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.bypass_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.data_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys);
+
+ cdev_del(&xrnic_dev->cdev);
+ device_destroy(xrnic_class, xrnic_dev_number);
+ cdev_del(&xrnic_dev->cdev);
+ unregister_chrdev_region(MAJOR(xrnic_dev_number), 1);
+ free_irq(xrnic_dev->xrnic_irq, xrnic_dev);
+ kfree(xrnic_dev);
+ class_destroy(xrnic_class);
+ unregister_inetaddr_notifier(&cmac_inetaddr_notifier);
+ unregister_inet6addr_notifier(&cmac_inet6addr_notifier);
+
+ return 0;
+}
+
+static const struct of_device_id xernic_of_match[] = {
+ { .compatible = "xlnx,ernic-1.0", },
+ { /* end of table*/ }
+};
+MODULE_DEVICE_TABLE(of, xernic_of_match);
+
+static struct platform_driver xernic_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xernic_of_match,
+ },
+ .probe = xernic_probe,
+ .remove = xernic_remove,
+};
+
+module_platform_driver(xernic_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xilinx RNIC driver");
+MODULE_AUTHOR("Sandeep Dhanvada");
diff --git a/drivers/staging/xlnx_ernic/xmain.h b/drivers/staging/xlnx_ernic/xmain.h
new file mode 100644
index 000000000000..2f45f94d2f85
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmain.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XLNX_MAIN_H_
+#define _XLNX_MAIN_H_
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#define XRNIC_VERSION "1.2"
+#define NUM_XRNIC_DEVS 1
+#define DEVICE_NAME "xrnic"
+#define DRIVER_NAME "xrnic"
+
+int xrnic_open(struct inode *inode, struct file *file);
+int xrnic_release(struct inode *inode, struct file *file);
+long xrnic_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+ssize_t xrnic_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos);
+ssize_t xrnic_write(struct file *file, const char *buf,
+ size_t count, loff_t *ppos);
+void xrnic_fill_wr(struct xrnic_qp_attr *qp_attr, u32 qp_depth);
+#ifdef __cplusplus
+ }
+#endif
+
+#endif
diff --git a/drivers/staging/xlnx_ernic/xmr.c b/drivers/staging/xlnx_ernic/xmr.c
new file mode 100644
index 000000000000..4959595d48d0
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmr.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory registrations helpers for RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#include "xcommon.h"
+#include "xhw_config.h"
+
+struct list_head mr_free;
+struct list_head mr_alloc;
+
+atomic_t pd_index = ATOMIC_INIT(0);
+int free_mem_ceil;
+int free_mem_remain;
+void __iomem *mtt_va;
+
+DECLARE_BITMAP(ernic_memtable, XRNIC_HW_MAX_QP_SUPPORT);
+/**
+ * alloc_pool_remove() - remove an entry from alloc pool
+ * @chunk: memory region to be removed from alloc pool.
+ * @return: 0 on success.
+ *
+ * TODO: Need to modify the return value as void and remove return statement.
+ */
+int alloc_pool_remove(struct mr *chunk)
+{
+ struct mr *next, *tmp;
+
+ list_for_each_entry_safe(next, tmp, &mr_alloc, list) {
+ if (next->paddr == chunk->paddr) {
+ __list_del_entry(&next->list);
+ free_mem_remain += chunk->len;
+ }
+ }
+ return 0;
+}
+
+/**
+ * free_pool_insert() - inserts specified memory region in the free pool
+ * @chunk: memory region to be inserted in free pool.
+ * @return: 0 on success. else, returns -ENOMEM.
+ *
+ * Adds the specified memory to the free pool and if possible,
+ * merges it with adjacent regions in free pool.
+ */
+int free_pool_insert(struct mr *chunk)
+{
+ struct mr *next, *dup, *tmp;
+ struct mr *prev = NULL;
+
+ dup = kzalloc(sizeof(*dup), GFP_ATOMIC);
+ memcpy(dup, chunk, sizeof(*dup));
+
+ /* If list is empty, then, add the new region to the free pool */
+ if (list_empty(&mr_free)) {
+ list_add_tail(&dup->list, &mr_free);
+ goto done;
+ }
+
+ /* If the new region size exceeds the free memory limit,
+ * return error.
+ */
+ if (free_mem_ceil < (free_mem_remain + dup->len))
+ return -ENOMEM;
+
+ /* For a non-empty list, add the region at a suitable place
+ * in the free pool.
+ */
+ list_for_each_entry_safe(next, tmp, &mr_free, list) {
+ if (dup->paddr < next->paddr) {
+ prev = list_prev_entry(next, list);
+ list_add(&dup->list, &prev->list);
+ goto merge_free_pool;
+ }
+ }
+ /*
+ * If no suitable position to insert within free pool, then,
+ * append at the tail.
+ */
+ list_add_tail(&dup->list, &mr_free);
+
+ /* If possible, merge the region with previous and next regions. */
+merge_free_pool:
+ if (next && (dup->paddr + dup->len == next->paddr)) {
+ dup->len += next->len;
+ __list_del_entry(&next->list);
+ }
+
+ if (prev && (prev->paddr + prev->len == dup->paddr)) {
+ prev->len += dup->len;
+ __list_del_entry(&dup->list);
+ }
+ /* Except Phys and Virt address, clear all the contents of the region,
+ * If this region is in alloc pool, remove it from alloc pool.
+ */
+done:
+ dup->lkey = 0;
+ dup->rkey = 0;
+ dup->vaddr = 0;
+ dup->access = MR_ACCESS_RESVD;
+ alloc_pool_remove(chunk);
+ return 0;
+}
+EXPORT_SYMBOL(free_pool_insert);
+
+/**
+ * alloc_pd() - Allocates a Protection Domain
+ * @return: returns pointer to ernic_pd struct.
+ *
+ */
+struct ernic_pd *alloc_pd(void)
+{
+ struct ernic_pd *new_pd;
+ /* TODO: Need to check for return value and return ENOMEM */
+ new_pd = kzalloc(sizeof(*new_pd), GFP_ATOMIC);
+ atomic_inc(&pd_index);
+ atomic_set(&new_pd->id, atomic_read(&pd_index));
+ return new_pd;
+}
+EXPORT_SYMBOL(alloc_pd);
+
+/**
+ * dealloc_pd() - Allocates a Protection Domain
+ * @pd: protection domain to be deallocated.
+ *
+ */
+void dealloc_pd(struct ernic_pd *pd)
+{
+ atomic_dec(&pd_index);
+ kfree(pd);
+}
+EXPORT_SYMBOL(dealloc_pd);
+
+/**
+ * dereg_mr() - deregisters the memory region from the Channel adapter.
+ * @mr: memory region to be de-registered.
+ *
+ * dereg_mr() de-registers a memory region with CA and clears the memory region
+ * registered with CA.
+ */
+void dereg_mr(struct mr *mr)
+{
+ int mtt_idx = (mr->rkey & 0xFF);
+
+ //memset(mtt_va + mtt_offset, 0, sizeof(struct ernic_mtt));
+ clear_bit(mtt_idx, ernic_memtable);
+}
+EXPORT_SYMBOL(dereg_mr);
+
+/**
+ * alloc_mem() - Allocates a Memory Region
+ * @pd: Protection domain mapped to the memory region
+ * @len: Length of the memory region required
+ * @return: on success, returns the physical address.
+ * else, returns -ENOMEM.
+ */
+phys_addr_t alloc_mem(struct ernic_pd *pd, int len)
+{
+ struct mr *next, *new_alloc, *new_free, *tmp;
+ int _len;
+
+ _len = round_up(len, 256);
+ new_alloc = kzalloc(sizeof(*new_alloc), GFP_KERNEL);
+ new_free = kzalloc(sizeof(*new_free), GFP_KERNEL);
+
+ /* requested more memory than the free pool capacity? */
+ if (free_mem_remain < _len)
+ goto err;
+
+ list_for_each_entry_safe(next, tmp, &mr_free, list) {
+ if (next->len == _len) {
+ new_alloc->paddr = next->paddr;
+ __list_del_entry(&next->list);
+ goto reg_mr;
+ }
+ if (next->len > _len) {
+ __list_del_entry(&next->list);
+ new_alloc->paddr = next->paddr;
+ new_free->paddr = next->paddr + _len;
+ new_free->len = next->len - _len;
+ free_pool_insert(new_free);
+ goto reg_mr;
+ }
+ }
+
+err:
+ /* No free memory of requested size */
+ kfree(new_alloc);
+ kfree(new_free);
+
+ return -ENOMEM;
+reg_mr:
+ free_mem_remain = free_mem_remain - _len;
+ new_alloc->pd = pd;
+ new_alloc->len = _len;
+ new_alloc->vaddr = (u64)(uintptr_t)ioremap(new_alloc->paddr, _len);
+ list_add_tail(&new_alloc->list, &mr_alloc);
+ return new_alloc->paddr;
+}
+EXPORT_SYMBOL(alloc_mem);
+
+u64 get_virt_addr(phys_addr_t phys_addr)
+{
+ struct mr *next;
+
+ list_for_each_entry(next, &mr_alloc, list) {
+ if (next->paddr == phys_addr)
+ return next->vaddr;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(get_virt_addr);
+
+/**
+ * free_mem() - inserts a memory region in free pool and
+ * removes from alloc pool
+ * @paddr: physical address to be freed.
+ *
+ */
+void free_mem(phys_addr_t paddr)
+{
+ struct mr *next;
+
+ list_for_each_entry(next, &mr_alloc, list) {
+ if (next->paddr == paddr)
+ goto found;
+ }
+ return;
+found:
+ iounmap((void __iomem *)(unsigned long)next->vaddr);
+ free_pool_insert(next);
+}
+EXPORT_SYMBOL(free_mem);
+
+/**
+ * register_mem_to_ca() - Registers a memory region with the Channel Adapter
+ * @mr: memory region to register.
+ * @return: a pointer to struct mr
+ *
+ * register_mem_to_ca() validates the memory region provided and registers
+ * the memory region with the CA and updates the mkey in the registered region.
+ *
+ */
+static struct mr *register_mem_to_ca(struct mr *mr)
+{
+ int bit, mtt_idx, offset;
+ struct ernic_mtt mtt;
+
+ bit = find_first_zero_bit(ernic_memtable, XRNIC_HW_MAX_QP_SUPPORT);
+ set_bit(bit, ernic_memtable);
+ mtt_idx = bit;
+ mtt.pa = mr->paddr;
+ mtt.iova = mr->vaddr;
+ mtt.pd = atomic_read(&mr->pd->id);
+ mr->rkey = (mtt_idx << 8) | bit;
+ mtt.rkey = mr->rkey;
+ mtt.access = mr->access;
+ mtt.len = mr->len;
+ offset = (int)(mtt_va + (mtt_idx * 0x100));
+
+ iowrite32(mtt.pd, (void __iomem *)(offset + ERNIC_PD_OFFSET));
+ iowrite32((mtt.iova & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_IOVA_OFFSET));
+ iowrite32(((mtt.iova >> 32) & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_IOVA_OFFSET + 4));
+ iowrite32((mtt.pa & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_PA_OFFSET));
+ iowrite32(((mtt.pa >> 32) & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_PA_OFFSET + 4));
+ iowrite32((mtt.rkey & 0xFFFF),
+ (void __iomem *)(offset + ERNIC_RKEY_OFFSET));
+ iowrite32(mtt.len, (void __iomem *)(offset + ERNIC_LEN_OFFSET));
+ iowrite32(mtt.access, (void __iomem *)(offset + ERNIC_ACCESS_OFFSET));
+ return mr;
+}
+
+/**
+ * reg_phys_mr() - Registers a physical address with the Channel Adapter
+ * @pd: Protection domian associtated with the physical address.
+ * @phys_addr: The physical address to be registered.
+ * @len: length of the buffer to be registered.
+ * @access: access permissions for the registered buffer.
+ * @va_reg_base: Virtual address. Currently, ERNIC doesn't support either
+ * Base Memory Extensions or Zero Based VA. So, this arg is
+ * ignired for now. This is just to satisfy the Verbs Signature.
+ * @return: on success, returns a pointer to struct mr.
+ * else, returns a pointer to error.
+ *
+ * register_mem_to_ca() validates the memory region provided and registers
+ * the memory region with the CA and updates the mkey in the registered region.
+ */
+struct mr *reg_phys_mr(struct ernic_pd *pd, phys_addr_t phys_addr,
+ int len, int access, void *va_reg_base)
+{
+ struct mr *phys_mr;
+ struct mr *next;
+
+ list_for_each_entry(next, &mr_alloc, list) {
+ if (next->paddr == phys_addr)
+ goto found;
+ }
+ /* Physical Address of the requested region is invalid */
+ return ERR_PTR(-EINVAL);
+found:
+ phys_mr = kzalloc(sizeof(*phys_mr), GFP_KERNEL);
+ phys_mr->paddr = phys_addr;
+ phys_mr->vaddr = next->vaddr;
+ phys_mr->len = len;
+ phys_mr->access = access;
+ phys_mr->pd = pd;
+
+ return register_mem_to_ca(phys_mr);
+}
+EXPORT_SYMBOL(reg_phys_mr);
+
+struct mr *query_mr(struct ernic_pd *pd)
+{
+ struct mr *next, *tmp;
+
+ list_for_each_entry_safe(next, tmp, &mr_alloc, list) {
+ if (atomic_read(&next->pd->id) == atomic_read(&pd->id)) {
+ pr_info("Found MR\n");
+ goto ret;
+ }
+ }
+ return ERR_PTR(-EINVAL);
+ret:
+ return next;
+}
+EXPORT_SYMBOL(query_mr);
+
+/**
+ * dump_list() - prints all the regions for the specified list.
+ * @head: HEAD pointer for the list to be printed.
+ *
+ * dump_list() iterates over the specified list HEAD and
+ * prints all the physical address and length at each node in the list.
+ */
+static void dump_list(struct list_head *head)
+{
+ struct mr *next;
+
+ list_for_each_entry(next, head, list) {
+ pr_info("MR [%d:%s] Phys_addr = %#x, vaddr = %llx, len = %d\n",
+ __LINE__, __func__,
+ next->paddr, next->vaddr, next->len);
+ }
+}
+
+/**
+ * dump_free_list() - prints all the regions in the free pool.
+ *
+ * dump_free_list() is a wrapper function for dump_list()
+ * to print free pool data
+ *
+ */
+void dump_free_list(void)
+{
+ dump_list(&mr_free);
+}
+EXPORT_SYMBOL(dump_free_list);
+
+/**
+ * dump_alloc_list() - prints all the regions in the alloc pool.
+ *
+ * dump_alloc_list() is a wrapper function for dump_list()
+ * to print alloc pool data
+ */
+void dump_alloc_list(void)
+{
+ dump_list(&mr_alloc);
+}
+EXPORT_SYMBOL(dump_alloc_list);
+
+/**
+ * init_mr() - Initialization function for memory region.
+ * @addr: Physical Address of the starting memory region.
+ * @length: Length of the region to initialize.
+ * @return: 0 on success.
+ * else, -EINVAL.
+ *
+ * init_mr() initializes a region of free memory
+ *
+ * Note: This should be called only once by the RNIC driver.
+ */
+int init_mr(phys_addr_t addr, int length)
+{
+ struct mr *reg = kmalloc(sizeof(struct mr *), GFP_KERNEL);
+
+ /* Multiple init_mr() calls? */
+ if (free_mem_ceil > 0)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&mr_free);
+ INIT_LIST_HEAD(&mr_alloc);
+ reg->paddr = addr;
+ reg->len = length;
+ free_pool_insert(reg);
+ free_mem_remain = reg->len;
+ free_mem_ceil = free_mem_remain;
+/* TODO: 0x2000 is the current Protection domain length for 255
+ * Protection Domains.
+ * Need to retrieve number of Protections doamins and length of each
+ * protection domains from DTS and calculate the overall remap size for
+ * all protection domains, instead of using a hard-coded value.
+ * currently, length of each protection domain is not exported in DTS.
+ */
+ mtt_va = ioremap(MTT_BASE, 0x2000);
+ return 0;
+}
diff --git a/drivers/staging/xlnx_ernic/xmr.h b/drivers/staging/xlnx_ernic/xmr.h
new file mode 100644
index 000000000000..7c822b22eff9
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmr.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+struct mr {
+ phys_addr_t paddr;
+ u64 vaddr;
+ int len;
+ unsigned int access;
+ struct ernic_pd *pd;
+ int lkey;
+ int rkey;
+ struct list_head list;
+};
+
+struct ernic_pd {
+ atomic_t id;
+};
+
+struct ernic_mtt {
+ unsigned long pd;
+#define ERNIC_PD_OFFSET 0
+ u64 iova;
+#define ERNIC_IOVA_OFFSET 4
+ u64 pa;
+#define ERNIC_PA_OFFSET 12
+ int rkey;
+#define ERNIC_RKEY_OFFSET 20
+ int len;
+#define ERNIC_LEN_OFFSET 24
+ unsigned int access;
+#define ERNIC_ACCESS_OFFSET 28
+};
+
+phys_addr_t alloc_mem(struct ernic_pd *pd, int len);
+void free_mem(phys_addr_t paddr);
+struct mr *query_mr(struct ernic_pd *pd);
+struct ernic_pd *alloc_pd(void);
+void dealloc_pd(struct ernic_pd *pd);
+void dump_free_list(void);
+void dump_alloc_list(void);
+int init_mr(phys_addr_t addr, int len);
+int free_pool_insert(struct mr *chunk);
+void dereg_mr(struct mr *mr);
+u64 get_virt_addr(phys_addr_t phys_addr);
+struct mr *reg_phys_mr(struct ernic_pd *pd, phys_addr_t phys_addr,
+ int len, int access, void *va_reg_base);
+int alloc_pool_remove(struct mr *chunk);
+
+extern void __iomem *mtt_va;
+/* TODO: Get the Base address and Length from DTS, instead of Macro.
+ * Currently, the design is only for Microblaze with a fixed memory
+ * in the design.
+ *
+ * MEMORY_REGION_BASE is a carve-out memory which will be ioremapped
+ * when required for ERNIC Configuration and Queue Pairs.
+ */
+#define MEMORY_REGION_BASE 0xC4000000
+#define MEMORY_REGION_LEN 0x3BFFFFFF
+/* TODO: Get MTT_BASE from DTS instead of Macro. */
+#define MTT_BASE 0x84000000
+#define MR_ACCESS_READ 0
+#define MR_ACCESS_WRITE 1
+#define MR_ACCESS_RDWR 2
+#define MR_ACCESS_RESVD 3
diff --git a/drivers/staging/xlnx_ernic/xperftest.h b/drivers/staging/xlnx_ernic/xperftest.h
new file mode 100644
index 000000000000..609469450a9f
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xperftest.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ */
+
+#ifndef _PERF_TEST_H
+#define _PERF_TEST_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+struct ernic_bwtest_struct {
+ u64 reserved1;
+ int qp_number;
+ int reserved2;
+ unsigned long long rkey;
+ unsigned long long vaddr;
+ char reserved3[24];
+};
+
+int perftest_parse_addr(struct sockaddr_storage *s_addr, char *buf);
+void rq_handler(u32 rq_count, void *rq_context);
+void sq_handler(u32 rq_count, void *sq_context);
+void perftest_fill_wr(void __iomem *sq_ba);
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _PERF_TEST_H*/
diff --git a/drivers/staging/xlnx_ernic/xqp.c b/drivers/staging/xlnx_ernic/xqp.c
new file mode 100644
index 000000000000..dae21fda5da6
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xqp.c
@@ -0,0 +1,1310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#include "xcommon.h"
+
+#define DISPLAY_REGS_ON_DISCONNECT
+#define EXPERIMENTAL_CODE
+
+struct xrnic_conn_param {
+ const void *private_data;
+ u8 private_data_len;
+ u8 responder_resources;
+ u8 initiator_depth;
+ u8 flow_control;
+ u8 retry_count;
+ u8 rnr_retry_count;
+ u8 srq;
+ u8 qp_num;
+};
+
+/* EXTRA Bytes for Invariant CRC */
+#define ERNIC_INV_CRC 4
+/* ERNIC Doesn't have Variant CRC for P2P */
+#define ERNIC_VAR_CRC 0
+#define EXTRA_PKT_LEN (ERNIC_INV_CRC + ERNIC_VAR_CRC)
+
+#define cpu_to_be24(x) ((x) << 16)
+
+#define CMA_VERSION 0
+#define QP_STAT_SQ_EMPTY_BIT_POS (9)
+#define QP_STAT_OUTSTANDG_EMPTY_Q_BIT_POS (10)
+
+int in_err_wr_ptr;
+struct list_head cm_id_list;
+
+/**
+ * xrnic_set_qp_state() - Sets the qp state to the desired state
+ * @qp_num: XRNIC QP number
+ * @state: State to set
+ *
+ * @return: XRNIC_SUCCESS in case of success or a error representative value
+ */
+int xrnic_set_qp_state(int qp_num, int state)
+{
+ if (qp_num < 0)
+ return -XRNIC_INVALID_QP_ID;
+
+ if (state != XRNIC_QP_IN_USE && state != XRNIC_QP_FREE)
+ return -XRNIC_INVALID_QP_STATUS;
+
+ xrnic_dev->qp_attr[qp_num].qp_status = state;
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_find_free_qp() - Finds the free qp to use
+ * @return: free QP Num or error value incase of no free QP
+ */
+int xrnic_find_free_qp(void)
+{
+ int i;
+
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ /*Checking for QP with ZERO REMOTE and LOCAL cm id*/
+ if (xrnic_dev->qp_attr[i].qp_status == XRNIC_QP_FREE)
+ return i;
+ }
+ return XRNIC_FAILED;
+}
+
+/**
+ * xrnic_rdma_create_qp() - Finds the free qp to use
+ * @cm_id: CM ID to associate with QP
+ * @pd: Protection domain to assosciate the QP with
+ * @init_attr: QP attributes or config values
+ * @return: XRNIC_SUCCESS if successful otherwise error representing code
+ */
+int xrnic_rdma_create_qp(struct xrnic_rdma_cm_id *cm_id, struct ernic_pd *pd,
+ struct xrnic_qp_init_attr *init_attr)
+{
+ struct xrnic_qp_attr *qp_attr;
+ struct xrnic_qp_info *qp_info;
+ int ret;
+
+ if (init_attr->sq_depth > XRNIC_MAX_SQ_DEPTH ||
+ init_attr->rq_depth > XRNIC_MAX_RQ_DEPTH ||
+ init_attr->send_sge_size > XRNIC_MAX_SEND_SGL_SIZE ||
+ init_attr->send_pkt_size > XRNIC_MAX_SEND_PKT_SIZE) {
+ return -XRNIC_INVALID_QP_INIT_ATTR;
+ }
+
+ qp_info = &cm_id->qp_info;
+
+ qp_info->qp_num = xrnic_find_free_qp();
+ qp_info->qp_num += 2;
+
+ ret = xrnic_set_qp_state((qp_info->qp_num - 2), XRNIC_QP_IN_USE);
+ if (ret < 0)
+ return ret;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+
+ if (qp_info->qp_num < 2 || qp_attr->qp_type != init_attr->qp_type)
+ return -XRNIC_INVALID_QP_ID;
+
+ cm_id->qp_type = init_attr->qp_type;
+ cm_id->local_cm_id = (qp_info->qp_num);
+
+ qp_info->xrnic_rq_event_handler = init_attr->xrnic_rq_event_handler;
+ qp_info->rq_context = init_attr->rq_context;
+ qp_info->xrnic_sq_event_handler = init_attr->xrnic_sq_event_handler;
+ qp_info->sq_context = init_attr->sq_context;
+
+ qp_info->rq_buf_ba_ca = init_attr->rq_buf_ba_ca;
+ qp_info->rq_buf_ba_ca_phys = init_attr->rq_buf_ba_ca_phys;
+ qp_info->sq_ba = init_attr->sq_ba;
+ qp_info->sq_ba_phys = init_attr->sq_ba_phys;
+ qp_info->cq_ba = init_attr->cq_ba;
+ qp_info->cq_ba_phys = init_attr->cq_ba_phys;
+
+ qp_info->sq_depth = init_attr->sq_depth;
+ qp_info->rq_depth = init_attr->rq_depth;
+ qp_info->send_sge_size = init_attr->send_sge_size;
+ qp_info->send_pkt_size = init_attr->send_pkt_size;
+ qp_info->recv_pkt_size = init_attr->recv_pkt_size;
+
+ qp_attr->rq_buf_ba_ca = qp_info->rq_buf_ba_ca;
+ qp_attr->rq_buf_ba_ca_phys = qp_info->rq_buf_ba_ca_phys;
+ qp_attr->sq_ba = qp_info->sq_ba;
+ qp_attr->sq_ba_phys = qp_info->sq_ba_phys;
+ qp_attr->cq_ba = qp_info->cq_ba;
+ qp_attr->cq_ba_phys = qp_info->cq_ba_phys;
+
+ qp_attr->sq_depth = qp_info->sq_depth;
+ qp_attr->rq_depth = qp_info->rq_depth;
+ qp_attr->send_sge_size = qp_info->send_sge_size;
+ qp_attr->send_pkt_size = qp_info->send_pkt_size;
+ qp_attr->recv_pkt_size = qp_info->recv_pkt_size;
+#ifdef ERNIC_MEM_REGISTER
+ if (pd)
+ qp_attr->pd = atomic_read(&pd->id);
+#endif
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_create_qp);
+
+/**
+ * xrnic_post_recv() - This function receives an incoming packet
+ * @qp_info: QP info on which packet should be received
+ * @rq_count: Number of packets to receive
+ * @return: SUCCESS if received required number of packets else error
+ * representative value
+ */
+int xrnic_post_recv(struct xrnic_qp_info *qp_info, u32 rq_count)
+{
+ struct xrnic_qp_attr *qp_attr;
+ int ret = -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ ret = xrnic_qp_recv_pkt(qp_attr, rq_count);
+
+ return ret;
+}
+EXPORT_SYMBOL(xrnic_post_recv);
+
+/**
+ * xrnic_post_send() - This function post a SEND WR
+ * @qp_info: QP info to post the request
+ * @sq_count: SEND packet count
+ * @return: SUCCESS if successfully posts a SEND,
+ * otherwise error representative value
+ */
+int xrnic_post_send(struct xrnic_qp_info *qp_info, u32 sq_count)
+{
+ struct xrnic_qp_attr *qp_attr;
+ int ret = -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ ret = xrnic_qp_send_pkt(qp_attr, sq_count);
+
+ return ret;
+}
+EXPORT_SYMBOL(xrnic_post_send);
+
+/**
+ * xrnic_destroy_qp() - Function destroys QP and reset the QP info
+ * @qp_info: QP info or config
+ * @return: XRNIC_SUCCESS if successfully destroys the QP,
+ * otherwise error representative value
+ */
+int xrnic_destroy_qp(struct xrnic_qp_info *qp_info)
+{
+ u32 qp_num;
+ struct xrnic_qp_attr *qp_attr;
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->qp_num >= 2) {
+ qp_num = qp_info->qp_num;
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ xrnic_set_qp_state((qp_num - 2), XRNIC_QP_FREE);
+
+ memset((void *)qp_info, 0, sizeof(struct xrnic_qp_info));
+
+ qp_attr->rq_buf_ba_ca = qp_info->rq_buf_ba_ca;
+ qp_attr->rq_buf_ba_ca_phys = qp_info->rq_buf_ba_ca_phys;
+ qp_attr->sq_ba = qp_info->sq_ba;
+ qp_attr->sq_ba_phys = qp_info->sq_ba_phys;
+ qp_attr->cq_ba = qp_info->cq_ba;
+ qp_attr->cq_ba_phys = qp_info->cq_ba_phys;
+
+ qp_attr->sq_depth = qp_info->sq_depth;
+ qp_attr->rq_depth = qp_info->rq_depth;
+ qp_attr->send_sge_size = qp_info->send_sge_size;
+ qp_attr->send_pkt_size = qp_info->send_pkt_size;
+ qp_attr->recv_pkt_size = qp_info->recv_pkt_size;
+ qp_attr->cm_id = NULL;
+ } else {
+ pr_err("Received invalid QP ID\n");
+ return -XRNIC_INVALID_QP_ID;
+ }
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_destroy_qp);
+
+/**
+ * xrnic_reset_io_qp() - This function reset the QP config
+ * @qp_attr: QP memory map or config
+ */
+void xrnic_reset_io_qp(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_ctrl_config *xrnic_ctrl_config;
+ struct xrnic_reg_map *reg_map;
+ unsigned long timeout;
+ u32 sq_pi_db_val, cq_head_val;
+ u32 rq_ci_db_val, stat_rq_pi_db_val;
+ u32 config_value;
+ int qp_num = qp_attr->qp_num - 2;
+ struct rdma_qp_attr *rdma_qp_attr;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ reg_map = xrnic_dev->xrnic_mmap.xrnic_regs;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ xrnic_ctrl_config = &reg_map->xrnic_ctrl_config;
+
+ /* 1. WAIT FOR SQ/OSQ EMPTY TO BE SET */
+ while (!((ioread32(&rdma_qp_attr->qp_status) >> 9) & 0x3))
+ ;
+
+ /* 2 WAIT FOR register values SQ_PI_DB == CQ_HEAD */
+ sq_pi_db_val = ioread32(((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ cq_head_val = ioread32(((void *)(&rdma_qp_attr->cq_head)));
+
+ timeout = jiffies;
+ while (!(sq_pi_db_val == cq_head_val)) {
+ sq_pi_db_val = ioread32(((void *)(&rdma_qp_attr->sq_pi_db)));
+ cq_head_val = ioread32(((void *)(&rdma_qp_attr->cq_head)));
+ if (time_after(jiffies, (timeout + 1 * HZ)))
+ break;
+ }
+
+ /* 3. WAIT FOR register values STAT_RQ_PI_DB == RQ_CI_DB */
+ rq_ci_db_val = ioread32(((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ stat_rq_pi_db_val = ioread32(((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+
+ timeout = jiffies;
+ while (!(rq_ci_db_val == stat_rq_pi_db_val)) {
+ rq_ci_db_val = ioread32(((void *)(&rdma_qp_attr->rq_ci_db)));
+ stat_rq_pi_db_val = ioread32(((void *)
+ (&rdma_qp_attr->stat_rq_pi_db)));
+ if (time_after(jiffies, (timeout + 1 * HZ)))
+ break;
+ }
+ /* 4. SET QP_CONF register HW handshake disable to 1 */
+ config_value = ioread32(((void *)(&rdma_qp_attr->qp_conf)));
+ config_value = config_value | XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_RQ_INTR_EN | XRNIC_QP_CONFIG_CQE_INTR_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ DEBUG_LOG("QP config value is 0x%x\n", config_value);
+
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ config_value = (xrnic_mmap->rq_wrptr_db_add_phys +
+ (4 * (qp_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_wrptr_db_add)));
+
+ config_value = (xrnic_mmap->sq_cmpl_db_add_phys +
+ (4 * (qp_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_cmpl_db_add)));
+
+ /* 5. SET QP_CONF register QP ENABLE TO 0 and QP_ADV_CONF register
+ * SW OVERRIDE TO 1
+ */
+ config_value = ioread32(((void *)(&rdma_qp_attr->qp_conf)));
+ config_value = config_value & ~XRNIC_QP_CONFIG_QP_ENABLE;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ /* Enable SW override enable */
+ config_value = 0x1;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ /* 6. Initialized QP under reset: */
+ config_value = 0x0;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+
+ config_value = qp_attr->rq_buf_ba_ca_phys & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_buf_ba_ca)));
+
+ config_value = 0x0;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->stat_curr_sqptr_pro)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->cq_head)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_psn)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->last_rq_req)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_msn)));
+
+ /* 7.Initialized Ethernet side registers */
+ /* NO need as we are doing during connect initiatlization */
+
+ /* 8. Set QP_CONF register QP ENABLE TO 1 */
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->qp_conf)));
+ config_value = config_value | XRNIC_QP_CONFIG_QP_ENABLE;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ config_value = ioread32((void *)&rdma_qp_attr->qp_conf);
+ config_value = config_value & ~XRNIC_QP_CONFIG_UNDER_RECOVERY;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ /* 9.Set QP_ADV_CONF register SW_OVERRIDE SET TO 0 */
+ /* Disable SW override enable */
+ config_value = 0;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ qp_attr->rq_wrptr_db_local = 0;
+ qp_attr->sq_cmpl_db_local = 0;
+ qp_attr->rq_ci_db_local = 0;
+ qp_attr->sq_pi_db_local = 0;
+ qp_attr->sqhd = 0;
+}
+
+/**
+ * xrnic_reset_io_qp_sq_cq_ptr() - This function resets SQ, CQ pointers of QP
+ * @qp_attr: QP config
+ * @hw_hs_info: QP HW handshake config
+ */
+void xrnic_reset_io_qp_sq_cq_ptr(struct xrnic_qp_attr *qp_attr,
+ struct xrnic_hw_handshake_info *hw_hs_info)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_reg_map *reg_map;
+ struct xrnic_ctrl_config *xrnic_ctrl_config;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = qp_attr->xrnic_mmap;
+ reg_map = xrnic_dev->xrnic_mmap.xrnic_regs;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ xrnic_ctrl_config = &reg_map->xrnic_ctrl_config;
+
+ /* Enable SW override enable */
+ config_value = 0x1;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ if (!hw_hs_info)
+ goto enable_hw_hs;
+
+ config_value = 0;
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->cq_head)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->stat_curr_sqptr_pro)));
+
+ config_value = hw_hs_info->rq_wrptr_db_add;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_wrptr_db_add)));
+
+ config_value = hw_hs_info->sq_cmpl_db_add;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_cmpl_db_add)));
+
+ config_value = ioread32((void *)(&rdma_qp_attr->stat_rq_pi_db));
+
+ config_value = hw_hs_info->cnct_io_conf_l_16b |
+ ((config_value & 0xFFFF) << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_config->cnct_io_conf)));
+enable_hw_hs:
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE |
+ xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(qp_attr->recv_pkt_size);
+
+ if (qp_attr->ip_addr_type == AF_INET6)
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ /* Disable SW override enable */
+
+ config_value = 0;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->cq_head)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ config_value = ioread32(((void *)
+ (&rdma_qp_attr->stat_curr_sqptr_pro)));
+
+ qp_attr->rq_wrptr_db_local = 0;
+ qp_attr->sq_cmpl_db_local = 0;
+ qp_attr->rq_ci_db_local = 0;
+ qp_attr->sq_pi_db_local = 0;
+ qp_attr->sqhd = 0;
+}
+
+/**
+ * xrnic_reset_io_qp_rq_ptr() - This function resets RQ pointers of QP
+ * @qp_attr: QP config
+ */
+void xrnic_reset_io_qp_rq_ptr(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_reg_map *reg_map;
+ struct xrnic_ctrl_config *xrnic_ctrl_config;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ reg_map = xrnic_dev->xrnic_mmap.xrnic_regs;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ xrnic_ctrl_config = &reg_map->xrnic_ctrl_config;
+
+ config_value = 0x1;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ config_value = 0x0;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+
+ config_value = qp_attr->rq_buf_ba_ca_phys & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_buf_ba_ca)));
+
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE |
+ XRNIC_QP_CONFIG_CQE_INTR_EN | xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(qp_attr->recv_pkt_size) |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_CQE_WRITE_EN;
+ if (qp_attr->ip_addr_type == AF_INET6)
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ /* Disable SW override enable */
+ config_value = 0x0;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->stat_rq_buf_ca)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+}
+
+/**
+ * xrnic_qp_send_pkt() - This function sends packets
+ * @qp_attr: QP config
+ * @sq_pkt_count: Number of packets to send
+ * @return: XRNIC_SUCCESS if successful
+ * otherwise error representative value
+ */
+int xrnic_qp_send_pkt(struct xrnic_qp_attr *qp_attr, u32 sq_pkt_count)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0, sq_pkt_count_tmp;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+
+ config_value = ioread32((char *)xrnic_mmap->sq_cmpl_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ if (config_value == 0)
+ sq_pkt_count_tmp = qp_attr->sq_depth;
+ else if (qp_attr->sq_cmpl_db_local >= config_value)
+ sq_pkt_count_tmp = (config_value + qp_attr->sq_depth) -
+ qp_attr->sq_cmpl_db_local;
+ else
+ sq_pkt_count_tmp = config_value - qp_attr->sq_cmpl_db_local;
+ if (sq_pkt_count_tmp < sq_pkt_count)
+ return -XRNIC_INVALID_PKT_CNT;
+
+ /* We need to maintain sq_cmpl_db_local as per hardware
+ * update for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect
+ * we need to maintain this variable.
+ */
+
+ qp_attr->sq_cmpl_db_local = qp_attr->sq_cmpl_db_local + sq_pkt_count;
+ if (qp_attr->sq_cmpl_db_local > qp_attr->sq_depth)
+ qp_attr->sq_cmpl_db_local = qp_attr->sq_cmpl_db_local
+ - qp_attr->sq_depth;
+ config_value = qp_attr->sq_cmpl_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_qp_recv_pkt() - This function receives packets
+ * @qp_attr: QP config
+ * @rq_pkt_count: receive packet count
+ * @return: XRNIC_SUCCESS if successful
+ * otherwise error representative value
+ */
+int xrnic_qp_recv_pkt(struct xrnic_qp_attr *qp_attr, u32 rq_pkt_count)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0, rq_pkt_count_tmp;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ if (config_value == 0)
+ rq_pkt_count_tmp = qp_attr->rq_depth;
+ else if (qp_attr->rq_wrptr_db_local >= config_value)
+ rq_pkt_count_tmp = (config_value + qp_attr->rq_depth) -
+ qp_attr->rq_wrptr_db_local;
+ else
+ rq_pkt_count_tmp = config_value - qp_attr->rq_wrptr_db_local;
+
+ if (rq_pkt_count_tmp < rq_pkt_count)
+ return -XRNIC_INVALID_PKT_CNT;
+ /* We need to maintain sq_cmpl_db_local as per hardware
+ * update for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect
+ * we need to maintain this variable.
+ */
+
+ qp_attr->rq_wrptr_db_local = qp_attr->rq_wrptr_db_local + rq_pkt_count;
+ if (qp_attr->rq_wrptr_db_local > qp_attr->rq_depth)
+ qp_attr->rq_wrptr_db_local = qp_attr->rq_wrptr_db_local
+ - qp_attr->rq_depth;
+
+ config_value = qp_attr->rq_wrptr_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_qp1_send_mad_pkt() - This function initiates sending a management
+ * datagram packet.
+ * @send_sgl_temp: Scatter gather list
+ * @qp1_attr: QP1 info
+ * @send_pkt_size: Send packe size
+ */
+void xrnic_qp1_send_mad_pkt(void *send_sgl_temp,
+ struct xrnic_qp_attr *qp1_attr, u32 send_pkt_size)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp1_attr *rdma_qp1_attr;
+ u32 config_value = 0;
+ struct wr *sq_wr; /*sq_ba*/
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp1_attr->xrnic_mmap;
+ rdma_qp1_attr = &xrnic_mmap->xrnic_regs->rdma_qp1_attr;
+
+ /* We need to maintain sq_cmpl_db_local as per hardware
+ * update for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect
+ * we need to maintain this variable.
+ */
+ sq_wr = (struct wr *)qp1_attr->sq_ba + qp1_attr->sq_cmpl_db_local;
+ /* All will be 4096 that is madatory.*/
+ sq_wr->length = send_pkt_size;
+ memcpy((void *)((char *)qp1_attr->send_sgl +
+ (qp1_attr->sq_cmpl_db_local * XRNIC_SEND_SGL_SIZE)),
+ (const void *)send_sgl_temp,
+ XRNIC_SEND_SGL_SIZE);
+ qp1_attr->sq_cmpl_db_local = qp1_attr->sq_cmpl_db_local + 1;
+
+ config_value = qp1_attr->sq_cmpl_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->sq_pi_db)));
+
+ if (qp1_attr->sq_cmpl_db_local == XRNIC_SQ_DEPTH)
+ qp1_attr->sq_cmpl_db_local = 0;
+}
+
+/**
+ * xrnic_qp_pkt_recv() - This function process received data packets
+ * @qp_attr: QP info on which data packet has been received
+ */
+static void xrnic_qp_pkt_recv(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap = (struct xrnic_memory_map *)
+ qp_attr->xrnic_mmap;
+ u32 config_value = 0;
+ unsigned long flag;
+ int rq_pkt_count = 0;
+ struct xrnic_rdma_cm_id *cm_id = qp_attr->cm_id;
+
+ spin_lock_irqsave(&qp_attr->qp_lock, flag);
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ if (qp_attr->rq_wrptr_db_local == config_value) {
+ spin_unlock_irqrestore(&qp_attr->qp_lock, flag);
+ return;
+ }
+ if (qp_attr->rq_wrptr_db_local > config_value) {
+ rq_pkt_count = (config_value + qp_attr->rq_depth) -
+ qp_attr->rq_wrptr_db_local;
+ } else {
+ rq_pkt_count = config_value - qp_attr->rq_wrptr_db_local;
+ }
+
+ cm_id->qp_info.xrnic_rq_event_handler(rq_pkt_count,
+ cm_id->qp_info.rq_context);
+
+ spin_unlock_irqrestore(&qp_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_wqe_completed() - This function process completion interrupts
+ * @qp_attr: QP info for which completion is received
+ */
+static void xrnic_wqe_completed(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0;
+ unsigned long flag;
+ struct xrnic_rdma_cm_id *cm_id = qp_attr->cm_id;
+ int qp_num = qp_attr->qp_num;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num - 2];
+ /* We need to maintain sq_cmpl_db_local as per hardware update
+ * for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect we
+ * need to maintain this variable.
+ */
+ spin_lock_irqsave(&qp_attr->qp_lock, flag);
+ config_value = ioread32((char *)&rdma_qp_attr->cq_head);
+ cm_id->qp_info.xrnic_sq_event_handler(config_value,
+ cm_id->qp_info.sq_context);
+ spin_unlock_irqrestore(&qp_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_wqe_completed_intr_handler() - Interrupt handler for completion
+ * interrupt type
+ * @data: XRNIC device info
+ */
+void xrnic_wqe_completed_intr_handler(unsigned long data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_qp_attr *qp_attr;
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ unsigned long cq_intr = 0, qp_num, i, j;
+ unsigned long flag;
+
+ for (i = 0 ; i < XRNIC_RQ_CQ_INTR_STS_REG_SUPPORTED ; i++) {
+ cq_intr = ioread32((void __iomem *)
+ ((&xrnic_ctrl_config->cq_intr_sts_1) +
+ (i * 4)));
+
+ if (!cq_intr)
+ continue;
+
+ for (j = find_first_bit(&cq_intr, XRNIC_REG_WIDTH);
+ j < XRNIC_REG_WIDTH;
+ j = find_next_bit(&cq_intr, XRNIC_REG_WIDTH, j + 1)) {
+ qp_num = (i << 5) + j;
+ iowrite32((1 << j), (void __iomem *)
+ ((&xrnic_ctrl_config->cq_intr_sts_1) +
+ (i * 4)));
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ if (qp_attr->cm_id)
+ xrnic_wqe_completed(qp_attr);
+ else
+ pr_err("Received CM ID is NULL\n");
+ }
+ }
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en |
+ WQE_COMPLETED_INTR_EN;
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_qp_pkt_recv_intr_handler() - Interrupt handler for data
+ * packet interrupt
+ * @data: XRNIC device info
+ */
+void xrnic_qp_pkt_recv_intr_handler(unsigned long data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_memory_map *xrnic_mmap =
+ (struct xrnic_memory_map *)&xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_qp_attr *qp_attr;
+ struct rdma_qp_attr *rdma_qp_attr;
+ struct xrnic_reg_map *regs;
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ unsigned long rq_intr = 0, qp_num, i, j, config_value;
+ unsigned long flag;
+
+ for (i = 0 ; i < XRNIC_RQ_CQ_INTR_STS_REG_SUPPORTED ; i++) {
+ rq_intr = ioread32((void __iomem *)
+ (&xrnic_ctrl_config->rq_intr_sts_1 + (i * 4)));
+
+ if (!rq_intr)
+ continue;
+
+ for (j = find_first_bit(&rq_intr, XRNIC_REG_WIDTH);
+ j < XRNIC_REG_WIDTH; j = find_next_bit
+ (&rq_intr, XRNIC_REG_WIDTH, j + 1)) {
+ qp_num = (i << 5) + j;
+ /* We need to change this with Work Request as
+ * for other Admin QP required wait events.
+ */
+ iowrite32((1 << j), ((void __iomem *)
+ (&xrnic_ctrl_config->rq_intr_sts_1) +
+ (i * 4)));
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ regs = xrnic_mmap->xrnic_regs;
+ rdma_qp_attr = &regs->rdma_qp_attr[qp_num - 2];
+ config_value = ioread32((void *)
+ (&rdma_qp_attr->qp_conf));
+ if (qp_attr->cm_id &&
+ (config_value & XRNIC_QP_CONFIG_HW_HNDSHK_DIS)) {
+ xrnic_qp_pkt_recv(qp_attr);
+ } else {
+ if (qp_attr->cm_id)
+ pr_err("Received CM ID is NULL\n");
+ else
+ pr_err("HW handshake is enabled\n");
+ }
+ }
+ }
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en |
+ QP_PKT_RCVD_INTR_EN;
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_qp_fatal_handler() - Interrupt handler for QP fatal interrupt type
+ * @data: XRNIC device info
+ */
+void xrnic_qp_fatal_handler(unsigned long data)
+{
+ struct xrnic_memory_map *xrnic_mmap =
+ (struct xrnic_memory_map *)&xrnic_dev->xrnic_mmap;
+ struct xrnic_ctrl_config *xrnic_conf =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ struct rdma_qp_attr *rdma_qp_attr;
+ int i, err_entries;
+ unsigned long timeout;
+ unsigned long config_value, qp_num, qp, sq_pi_db_val, cq_head_val;
+ struct xrnic_qp_attr *qp_attr;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ err_entries = ioread32((void *)&xrnic_conf->in_errsts_q_wrptr);
+ pr_info("No of QPs in Fatal: %d\r\n", err_entries - in_err_wr_ptr);
+ for (i = 0; i < (err_entries - in_err_wr_ptr); i++) {
+ qp_num = ioread32((char *)xrnic_mmap->in_errsts_q_ba +
+ ((8 * in_err_wr_ptr) + (8 * i)));
+ qp_num = (qp_num & 0xFFFF0000) >> 16;
+ qp = qp_num - 2;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp];
+ if (rdma_qp_attr) {
+ while (!((ioread32(&rdma_qp_attr->qp_status) >> 9) &
+ 0x3))
+ DEBUG_LOG("Fatal wait for SQ/OSQ empty\n");
+
+ /* 2 WAIT FOR register values SQ_PI_DB == CQ_HEAD */
+ sq_pi_db_val = ioread32(((void *)
+ (&rdma_qp_attr->sq_pi_db)));
+
+ cq_head_val = ioread32((void *)&rdma_qp_attr->cq_head);
+
+ timeout = jiffies;
+ while (!(sq_pi_db_val == cq_head_val)) {
+ sq_pi_db_val = ioread32(((void *)
+ (&rdma_qp_attr->sq_pi_db)));
+ cq_head_val = ioread32(((void *)
+ (&rdma_qp_attr->cq_head)));
+ if (time_after(jiffies, (timeout + 1 * HZ))) {
+ pr_info("SQ PI != CQ Head\n");
+ break;
+ }
+ }
+
+ /* Poll and wait for register value
+ * RESP_HNDL_STS.sq_pici_db_check_en == ‘1’
+ */
+ while (!((ioread32(&xrnic_conf->resp_handler_status)
+ >> 16) & 0x1))
+ DEBUG_LOG("waiting for RESP_HNDL_STS\n");
+
+ config_value = ioread32((void *)
+ &rdma_qp_attr->qp_conf);
+ config_value = config_value &
+ (~XRNIC_QP_CONFIG_QP_ENABLE);
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->qp_conf)));
+
+ config_value = ioread32((void *)
+ &rdma_qp_attr->qp_conf);
+ config_value = config_value |
+ XRNIC_QP_CONFIG_UNDER_RECOVERY;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->qp_conf)));
+
+ /* Calling CM Handler to disconnect QP.*/
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event =
+ XRNIC_DREQ_RCVD;
+ cm_id_info->conn_event_info.status = 1;
+ cm_id_info->conn_event_info.private_data_len =
+ 0;
+ cm_id_info->conn_event_info.private_data =
+ NULL;
+ qp_attr->cm_id->xrnic_cm_handler
+ (qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ qp_attr->cm_id = NULL;
+ } else {
+ pr_err("Received CM ID is NULL\n");
+ }
+ }
+ in_err_wr_ptr++;
+ }
+}
+
+/**
+ * xrnic_qp1_hw_configuration() - This function configures the QP1 registers
+ * @return: 0 if successfully configures QP1
+ */
+int xrnic_qp1_hw_configuration(void)
+{
+ struct xrnic_memory_map *xrnic_mmap = (struct xrnic_memory_map *)
+ &xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp1_attr = (struct xrnic_qp_attr *)
+ &xrnic_dev->qp1_attr;
+ struct rdma_qp1_attr *rdma_qp1_attr;
+ u32 config_value = 0;
+
+ qp1_attr->qp_num = 1;
+ rdma_qp1_attr = &xrnic_dev->xrnic_mmap.xrnic_regs->rdma_qp1_attr;
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE | xrnic_dev->pmtu |
+ XRNIC_QP1_CONFIG_RQ_BUFF_SZ |
+ XRNIC_QP_CONFIG_RQ_INTR_EN |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS;
+
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->qp_conf)));
+
+ config_value = (xrnic_mmap->rq_buf_ba_ca_phys +
+ ((qp1_attr->qp_num - 1) * XRNIC_RECV_PKT_SIZE *
+ XRNIC_RQ_DEPTH)) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->rq_buf_ba_ca)));
+
+ qp1_attr->rq_buf_ba_ca = xrnic_mmap->rq_buf_ba_ca +
+ ((qp1_attr->qp_num - 1) *
+ XRNIC_RECV_PKT_SIZE *
+ XRNIC_RQ_DEPTH);
+
+ qp1_attr->rq_buf_ba_ca_phys = config_value;
+
+ config_value = xrnic_mmap->sq_ba_phys + ((qp1_attr->qp_num - 1) *
+ XRNIC_SEND_PKT_SIZE * XRNIC_SQ_DEPTH);
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->sq_ba)));
+
+ qp1_attr->sq_ba = (struct wr *)((void *)xrnic_mmap->sq_ba +
+ ((qp1_attr->qp_num - 1) *
+ XRNIC_SEND_PKT_SIZE *
+ XRNIC_SQ_DEPTH));
+ qp1_attr->sq_ba_phys = config_value;
+
+ qp1_attr->send_sgl_phys = xrnic_mmap->send_sgl_phys +
+ (XRNIC_SEND_SGL_SIZE *
+ XRNIC_SQ_DEPTH *
+ (qp1_attr->qp_num - 1));
+ qp1_attr->send_sgl = xrnic_mmap->send_sgl +
+ (XRNIC_SEND_SGL_SIZE *
+ XRNIC_SQ_DEPTH *
+ (qp1_attr->qp_num - 1));
+
+ xrnic_fill_wr(qp1_attr, XRNIC_SQ_DEPTH);
+
+ config_value = xrnic_mmap->cq_ba_phys + ((qp1_attr->qp_num - 1) *
+ XRNIC_SQ_DEPTH * sizeof(struct cqe));
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->cq_ba)));
+
+ qp1_attr->cq_ba = (struct cqe *)(xrnic_mmap->cq_ba +
+ ((qp1_attr->qp_num - 1) *
+ XRNIC_SQ_DEPTH *
+ sizeof(struct cqe)));
+ config_value = (xrnic_mmap->rq_wrptr_db_add_phys +
+ (4 * (qp1_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp1_attr->rq_wrptr_db_add)));
+
+ config_value = (xrnic_mmap->sq_cmpl_db_add_phys +
+ (4 * (qp1_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp1_attr->sq_cmpl_db_add)));
+
+ config_value = XRNIC_SQ_DEPTH | (XRNIC_RQ_DEPTH << 16);
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->q_depth)));
+
+ config_value = (xrnic_mmap->stat_rq_buf_ca_phys +
+ (4 * (qp1_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp1_attr->stat_rq_buf_ca)));
+
+ config_value = XRNIC_QP_TIMEOUT_CONFIG_TIMEOUT |
+ XRNIC_QP_TIMEOUT_CONFIG_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_NAK_TVAL;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->timeout_conf)));
+ qp1_attr->qp1_attr = (struct xrnic_qp_attr *)&xrnic_dev->qp1_attr;
+ qp1_attr->rq_wrptr_db_local = 0;
+ qp1_attr->sq_cmpl_db_local = 0;
+ qp1_attr->rq_ci_db_local = 0;
+ qp1_attr->sq_pi_db_local = 0;
+
+ qp1_attr->resend_count = 0;
+ qp1_attr->local_cm_id = htonl(qp1_attr->qp_num);
+ qp1_attr->remote_cm_id = 0;
+
+ qp1_attr->curr_state = XRNIC_LISTEN;
+
+ qp1_attr->sqhd = 0;
+ qp1_attr->qp_type = XRNIC_QPT_UC;
+ qp1_attr->ip_addr_type = 0;
+
+ qp1_attr->xrnic_mmap = &xrnic_dev->xrnic_mmap;
+
+ spin_lock_init(&qp1_attr->qp_lock);
+ return 0;
+}
+
+/**
+ * xrnic_display_qp_reg() - This function displays qp register info
+ * @qp_num: QP num for which register dump is required
+ */
+void xrnic_display_qp_reg(int qp_num)
+{
+ int i;
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr =
+ &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num - 2];
+
+ for (i = 0; i < 45; i++)
+ pr_info("0x%X: 0x%08X\n",
+ (0x84020000 + (0x100 * (qp_num + 1)) + (i * 4)),
+ ioread32((void __iomem *)rdma_qp_attr + (i * 4)));
+}
+
+/**
+ * xrnic_qp_timer() - This function configures QP timer
+ * @data: QP attribute info
+ */
+void xrnic_qp_timer(struct timer_list *data)
+{
+ struct xrnic_qp_attr *qp_attr = (struct xrnic_qp_attr *)data;
+ struct xrnic_qp_attr *qp1_attr = qp_attr->qp1_attr;
+ enum xrnic_rej_reason reason;
+ enum xrnic_msg_rej msg;
+ unsigned long flag;
+ int qp1_send_pkt_size;
+
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ if (qp_attr->ip_addr_type == AF_INET)
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ else
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ if (qp_attr->curr_state == XRNIC_REJ_SENT) {
+ DEBUG_LOG("REJ SENT\n");
+ if (qp_attr->resend_count < XRNIC_REJ_RESEND_COUNT) {
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = qp_attr->resend_count + 1;
+ qp_attr->curr_state = XRNIC_REJ_SENT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else {
+ qp_attr->resend_count = 0;
+ qp_attr->remote_cm_id = 0;
+ xrnic_reset_io_qp(qp_attr);
+ memset((void *)&qp_attr->mac_addr, 0,
+ XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ xrnic_qp_app_configuration(qp_attr->qp_num,
+ XRNIC_HW_QP_DISABLE);
+ qp_attr->curr_state = XRNIC_LISTEN;
+ }
+ } else if (qp_attr->curr_state == XRNIC_REP_SENT) {
+ DEBUG_LOG("REP SENT\n");
+ if (qp_attr->resend_count < XRNIC_REJ_RESEND_COUNT) {
+ qp_attr->curr_state = XRNIC_RTU_TIMEOUT;
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = qp_attr->resend_count + 1;
+ qp_attr->curr_state = XRNIC_REP_SENT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else {
+ reason = XRNIC_REJ_TIMEOUT;
+ msg = XRNIC_REJ_REP;
+ xrnic_cm_prepare_rej(qp_attr, msg, reason);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ }
+ } else if (qp_attr->curr_state == XRNIC_MRA_RCVD) {
+ DEBUG_LOG("MRA Received\n");
+ qp_attr->curr_state = XRNIC_RTU_TIMEOUT;
+
+ reason = XRNIC_REJ_TIMEOUT;
+ msg = XRNIC_REJ_TIMEOUT;
+ xrnic_cm_prepare_rej(qp_attr, msg, reason);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else if (qp_attr->curr_state == XRNIC_DREQ_SENT) {
+ DEBUG_LOG("Disconnect Req Sent\n");
+ if (qp_attr->resend_count < XRNIC_DREQ_RESEND_COUNT) {
+ qp_attr->curr_state = XRNIC_DREP_TIMEOUT;
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = qp_attr->resend_count + 1;
+ qp_attr->curr_state = XRNIC_DREQ_SENT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else {
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ }
+ } else if (qp_attr->curr_state == XRNIC_TIMEWAIT) {
+ DEBUG_LOG("In time wait state\n");
+ qp_attr->resend_count = 0;
+ qp_attr->remote_cm_id = 0;
+#ifdef DISPLAY_REGS_ON_DISCONNECT
+ xrnic_display_qp_reg(qp_attr->qp_num);
+#endif
+ xrnic_reset_io_qp(qp_attr);
+ memset((void *)&qp_attr->mac_addr, 0, XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ xrnic_qp_app_configuration(qp_attr->qp_num,
+ XRNIC_HW_QP_DISABLE);
+ qp_attr->curr_state = XRNIC_LISTEN;
+ } else {
+ qp_attr->resend_count = 0;
+ qp_attr->qp_timer.expires = 0;
+ }
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_qp_app_configuration() - This function programs the QP registers
+ * @qp_num: QP num to configure
+ * @hw_qp_status: value to indicae HW QP or not
+ */
+void xrnic_qp_app_configuration(int qp_num,
+ enum xrnic_hw_qp_status hw_qp_status)
+{
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ struct rdma_qp_attr *rdma_qp_attr =
+ &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num - 2];
+ u32 config_value = 0;
+ int recv_pkt_size = qp_attr->recv_pkt_size;
+
+ /* Host number will directly map to local cm id.*/
+ if (hw_qp_status == XRNIC_HW_QP_ENABLE) {
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE |
+ XRNIC_QP_CONFIG_RQ_INTR_EN |
+ XRNIC_QP_CONFIG_CQE_INTR_EN | xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(recv_pkt_size) |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_CQE_WRITE_EN;
+ } else if (hw_qp_status == XRNIC_HW_QP_DISABLE) {
+ config_value = XRNIC_QP_CONFIG_RQ_INTR_EN |
+ XRNIC_QP_CONFIG_CQE_INTR_EN | xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(recv_pkt_size) |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_CQE_WRITE_EN;
+ config_value = 0;
+ } else {
+ DEBUG_LOG("Invalid HW QP status\n");
+ }
+ if (qp_attr->ip_addr_type == AF_INET6)
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ config_value = qp_attr->rq_buf_ba_ca_phys;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_buf_ba_ca)));
+
+ config_value = qp_attr->sq_ba_phys;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_ba)));
+
+ config_value = qp_attr->cq_ba_phys;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->cq_ba)));
+
+ config_value = qp_attr->sq_depth | (qp_attr->rq_depth << 16);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->q_depth)));
+
+ config_value = (qp_attr->starting_psn |
+ (IB_OPCODE_RC_SEND_ONLY << 24));
+ iowrite32(config_value, (void *)&rdma_qp_attr->last_rq_req);
+
+ config_value = be32_to_cpu(qp_attr->ipv4_addr);
+ iowrite32(config_value, (void *)&rdma_qp_attr->ip_dest_addr1);
+ config_value = ((qp_attr->mac_addr[2] << 24) |
+ (qp_attr->mac_addr[3] << 16) |
+ (qp_attr->mac_addr[4] << 8) |
+ qp_attr->mac_addr[5]);
+ iowrite32(config_value, (void *)&rdma_qp_attr->mac_dest_addr_lsb);
+
+ config_value = ((qp_attr->mac_addr[0] << 8) | qp_attr->mac_addr[1]);
+ iowrite32(config_value, (void *)&rdma_qp_attr->mac_dest_addr_msb);
+
+ config_value = qp_attr->remote_qp;
+ iowrite32(config_value, (void *)&rdma_qp_attr->dest_qp_conf);
+
+ iowrite32(qp_attr->rem_starting_psn, (void *)&rdma_qp_attr->sq_psn);
+#ifdef ERNIC_MEM_REGISTER
+ if (qp_attr->pd)
+ iowrite32(qp_attr->pd, ((void *)(&rdma_qp_attr->pd)));
+#endif
+}
+
+/**
+ * xrnic_qp_hw_configuration() - This function configures QP registers
+ * @qp_num: QP num
+ */
+void xrnic_qp_hw_configuration(int qp_num)
+{
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp_attr = &xrnic_dev->qp_attr[qp_num];
+ struct rdma_qp_attr *rdma_qp_attr =
+ &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ u32 config_value = 0;
+
+ /* As qp_num start from 0 and data QP start from 2 */
+ qp_attr->qp_num = qp_num + 2;
+
+ config_value = XRNIC_QP_ADV_CONFIG_TRAFFIC_CLASS |
+ XRNIC_QP_ADV_CONFIG_TIME_TO_LIVE |
+ XRNIC_QP_ADV_CONFIG_PARTITION_KEY;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_adv_conf)));
+
+ /*DDR address for RQ and SQ doorbell.*/
+
+ config_value = xrnic_mmap->rq_wrptr_db_add_phys +
+ (4 * (qp_attr->qp_num - 1));
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_wrptr_db_add)));
+
+ config_value = (xrnic_mmap->sq_cmpl_db_add_phys +
+ (4 * (qp_attr->qp_num - 1)))
+ & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_cmpl_db_add)));
+
+ config_value = (xrnic_mmap->stat_rq_buf_ca_phys +
+ (4 * (qp_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_rq_buf_ca)));
+
+ config_value = XRNIC_QP_TIMEOUT_CONFIG_TIMEOUT |
+ XRNIC_QP_TIMEOUT_CONFIG_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_NAK_TVAL;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->timeout_conf)));
+ qp_attr->qp1_attr = (struct xrnic_qp_attr *)&xrnic_dev->qp1_attr;
+ qp_attr->rq_wrptr_db_local = 0;
+ qp_attr->sq_cmpl_db_local = 0;
+ qp_attr->rq_ci_db_local = 0;
+ qp_attr->sq_pi_db_local = 0;
+ qp_attr->cm_id = NULL;
+ qp_attr->resend_count = 0;
+ qp_attr->local_cm_id = qp_attr->qp_num;
+ qp_attr->remote_cm_id = 0;
+ memset((void *)&qp_attr->mac_addr, 0, XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ qp_attr->sqhd = 0;
+ qp_attr->qp_type = XRNIC_QPT_RC;
+ qp_attr->ip_addr_type = 0;
+
+ qp_attr->curr_state = XRNIC_LISTEN;
+
+ qp_attr->xrnic_mmap = &xrnic_dev->xrnic_mmap;
+
+ /* Intitialize State with XRNIC_LISTEN */
+ timer_setup(&qp_attr->qp_timer, xrnic_qp_timer,
+ (unsigned long)qp_attr);
+
+ spin_lock_init(&qp_attr->qp_lock);
+}
+
+#ifdef EXPERIMENTAL_CODE
+#define XRNIC_REG_MAP_NODE 0
+#define XRNIC_SEND_SGL_NODE 1
+#define XRNIC_CQ_BA_NODE 1
+#define XRNIC_RQ_BUF_NODE 1
+#define XRNIC_SQ_BA_NODE 1
+#define XRNIC_TX_HDR_BUF_NODE 1
+#define XRNIC_TX_SGL_BUF_NODE 1
+#define XRNIC_BYPASS_BUF_NODE 1
+#define XRNIC_ERRPKT_BUF_NODE 1
+#define XRNIC_OUTERR_STS_NODE 1
+
+#define XRNIC_RQWR_PTR_NODE 1
+#define XRNIC_SQ_CMPL_NODE 2
+#define XRNIC_STAT_XRNIC_RQ_BUF_NODE 3
+#else /* ! EXPERIMENTAL_CODE */
+#define XRNIC_REG_MAP_NODE 0
+#define XRNIC_SEND_SGL_NODE 1
+#define XRNIC_CQ_BA_NODE 2
+#define XRNIC_RQ_BUF_NODE 3
+#define XRNIC_SQ_BA_NODE 4
+#define XRNIC_TX_HDR_BUF_NODE 5
+#define XRNIC_TX_SGL_BUF_NODE 6
+#define XRNIC_BYPASS_BUF_NODE 7
+#define XRNIC_ERRPKT_BUF_NODE 8
+#define XRNIC_OUTERR_STS_NODE 9
+#define XRNIC_INERR_STS_NODE 10
+#define XRNIC_RQWR_PTR_NODE 11
+#define XRNIC_SQ_CMPL_NODE 12
+#define XRNIC_STAT_XRNIC_RQ_BUF_NODE 13
+#define XRNIC_DATA_BUF_BA_NODE 14
+#define XRNIC_RESP_ERR_PKT_BUF_BA 15
+#endif /* EXPERIMENTAL_CODE */
diff --git a/drivers/staging/xlnx_ernic/xqp.h b/drivers/staging/xlnx_ernic/xqp.h
new file mode 100644
index 000000000000..442932f66daf
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xqp.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef QP_H
+#define QP_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/interrupt.h>
+enum qp_type {
+ XRNIC_NOT_ALLOCATED = 1,
+ XRNIC_DISC_CTRL_QP = 2,
+ XRNIC_NVMEOF_CTRL_QP = 3,
+ XRNIC_NVMEOF_IO_QP = 4,
+};
+
+enum ernic_qp_status {
+ XRNIC_QP_FREE,
+ XRNIC_QP_IN_USE,
+};
+
+struct xrnic_qp_attr {
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_qp_attr *qp1_attr;
+ struct xrnic_rdma_cm_id *cm_id;
+ void *send_sgl;
+ u64 send_sgl_phys;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ void *sq_ba;
+ u64 sq_ba_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sge_size;
+ u32 send_pkt_size;
+ u32 recv_pkt_size;
+ u32 qp_num;
+ u32 local_cm_id;
+ u32 remote_cm_id;
+ u32 remote_qpn;
+ u32 qp_status;
+ u32 starting_psn;
+ u32 rem_starting_psn;
+ u8 send_sgl_temp[XRNIC_QP1_SEND_PKT_SIZE];
+ u32 resend_count;
+ u32 rq_wrptr_db_local;
+ u32 sq_cmpl_db_local;
+ u32 rq_ci_db_local;
+ u32 sq_pi_db_local;
+ u16 ip_addr_type; /* DESTINATION ADDR_FAMILY */
+ u32 ipv4_addr; /* DESTINATION IP addr */
+ u8 ipv6_addr[16];
+ u8 mac_addr[6];
+ u32 source_qp_num;
+ /* remote qpn used in Active CM. source_qp_num is the source
+ * queue pair in deth
+ */
+ u32 remote_qp;
+ enum xrnic_rdma_cm_event_type curr_state;
+ /* DISC or NVMECTRL Its direct mapping to host ID to
+ * particular host_no.
+ */
+ enum xrnic_qp_type qp_type;
+ u16 sqhd;
+ /*Its direct mapping to host ID to access particular host_no.*/
+ u16 nvmeof_cntlid;
+ u32 nvmeof_qp_id;
+ struct timer_list qp_timer;
+ struct tasklet_struct qp_task;
+ /* kernel locking primitive */
+ spinlock_t qp_lock;
+ char irq_name[32];
+ u32 irq_vect;
+ u32 pd;
+};
+
+enum xrnic_hw_qp_status {
+ XRNIC_HW_QP_ENABLE,
+ XRNIC_HW_QP_DISABLE,
+};
+
+void xrnic_display_qp_reg(int qp_num);
+void xrnic_qp_fatal_handler(unsigned long data);
+void xrnic_qp_timer(struct timer_list *data);
+void xrnic_qp_pkt_recv_intr_handler(unsigned long data);
+void xrnic_qp_task_handler(unsigned long data);
+void xrnic_wqe_completed_intr_handler(unsigned long data);
+
+/* QP Specific function templates */
+int xrnic_qp_recv_pkt(struct xrnic_qp_attr *qp_attr, u32 rq_pkt_count);
+int xrnic_qp_send_pkt(struct xrnic_qp_attr *qp_attr, u32 sq_pkt_count);
+void xrnic_reset_io_qp_rq_ptr(struct xrnic_qp_attr *qp_attr);
+void xrnic_reset_io_qp_sq_cq_ptr(struct xrnic_qp_attr *qp_attr,
+ struct xrnic_hw_handshake_info *hw_hs_info);
+void xrnic_qp_hw_configuration(int qp_num);
+int xrnic_qp1_hw_configuration(void);
+void xrnic_qp_app_configuration(int qp_num,
+ enum xrnic_hw_qp_status hw_qp_status);
+int xrnic_find_free_qp(void);
+int xrnic_set_qp_state(int qp_num, int state);
+
+#ifdef __cplusplus
+ }
+#endif
+#endif
diff --git a/drivers/staging/xlnx_ernic/xrocev2.h b/drivers/staging/xlnx_ernic/xrocev2.h
new file mode 100644
index 000000000000..fec90081d094
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xrocev2.h
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_ROCEV2_H
+#define _XRNIC_ROCEV2_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/udp.h>
+#include <rdma/ib_pack.h>
+
+#define XRNIC_REQ_QPN 0x1
+#define XRNIC_RESPONDER_RESOURCES 0x10
+#define XRNIC_INITIATOR_DEPTH 0x10
+#define XRNIC_REQ_LOCAL_CM_RESP_TOUT 0x11
+#define XRNIC_REQ_REMOTE_CM_RESP_TOUT 0x14
+#define XRNIC_REQ_PATH_PKT_PAYLOAD_MTU 92
+#define XRNIC_REQ_RETRY_COUNT 0x7
+#define XRNIC_REQ_RDC_EXISTS 1
+#define XRNIC_REQ_SRQ 0
+
+#define XRNIC_REJ_INFO_LEN 0
+
+#define XRNIC_MRA_SERVICE_TIMEOUT 0x11
+
+#define XRNIC_REP_END_END_FLOW_CONTROL 0x0
+#define XRNIC_REP_FAIL_OVER_ACCEPTED 0x3
+#define XRNIC_REP_TARGET_ACK_DELAY 0x1F
+#define XRNIC_REP_RNR_RETRY_COUNT 0x7
+
+#define XRNIC_CM_TIMEOUT 0x4
+#define XRNIC_CM_TIMER_TIMEOUT 0x11
+
+enum xrnic_wc_opcod {
+ XRNIC_RDMA_WRITE = 0x0,
+ XRNIC_SEND_ONLY = 0x2,
+ XRNIC_RDMA_READ = 0x4
+};
+
+enum xrnic_msg_rej {
+ XRNIC_REJ_REQ = 0x0,
+ XRNIC_REJ_REP = 0x1,
+ XRNIC_REJ_OTHERS = 0x2,
+};
+
+enum xrnic_msg_mra {
+ XRNIC_MRA_REQ = 0x0,
+ XRNIC_MRA_REP = 0x1,
+ XRNIC_MRA_LAP = 0x2,
+};
+
+enum xrnic_rej_reason {
+ XRNIC_REJ_NO_QP_AVAILABLE = 1,
+ XRNIC_REJ_NO_EE_AVAILABLE = 2,
+ XRNIC_REJ_NO_RESOURCE_AVAILABLE = 3,
+ XRNIC_REJ_TIMEOUT = 4,
+ XRNIC_REJ_UNSUPPORTED_REQ = 5,
+ XRNIC_REJ_INVALID_CM_ID = 6,
+ XRNIC_REJ_INVALID_QPN = 7,
+ XRNIC_REJ_RDC_NOT_EXIST = 11,
+ XRNIC_REJ_PRIM_LID_PORT_NOT_EXIST = 13,
+ XRNIC_REJ_INVALID_MTU = 26,
+ XRNIC_REJ_INSUFFICIENT_RESP_RESOURCE = 27,
+ XRNIC_REJ_CONSUMER_REJECT = 28,
+ XRNIC_REJ_DUPLICATE_LOCAL_CM_ID = 30,
+ XRNIC_REJ_UNSUPPORTED_CLASS_VERSION = 31,
+};
+
+//mad common status field
+struct mad_comm_status {
+ __u8 busy:1;
+ __u8 redir_reqd:1;
+ __u8 invalid_field_code:3;
+ __u8 reserved:3;
+ __u8 class_specific;
+} __packed;
+
+#define XRNIC_MAD_BASE_VER 1
+#define XRNIC_MAD_MGMT_CLASS 0x07
+#define XRNIC_MAD_RESP_BIT 0x0
+#define XRNIC_MAD_COMM_SEND 0x3
+#define XRNIC_MAD_RESERVED 0x0
+
+/* Management data gram (MAD's) */
+struct mad //Size 256Byte
+{
+ __u8 base_ver;
+ __u8 mgmt_class;
+ __u8 class_version;
+ __u8 resp_bit_method;
+ struct mad_comm_status status;// 2 bytes
+ __be16 class_specific;
+ __be64 transaction_id;
+ __be16 attribute_id;
+ __be16 reserved;
+ __be32 attrb_modifier;
+ __be32 data[58];
+} __packed;
+
+struct req {
+ __u32 local_cm_id;
+ __u32 reserved1;
+ __u8 service_id[8];
+ __u8 local_ca_guid[8];
+ __u32 reserved2;
+ __u32 local_q_key;
+ __u32 local_qpn:24;
+ __u8 responder_resources:8;
+ __u32 local_eecn:24;
+ __u32 initiator_depth:8;
+ __u32 remote_eecn:24;
+
+ __u32 remote_cm_resp_tout:5;
+ __u32 transport_svc_type:2;
+ __u32 e2e_flow_control:1;
+ __u8 start_psn[3];
+ __u8 local_cm_resp_tout:5;
+ __u8 retry_count: 3;
+ __u16 p_key;
+ __u8 path_packet_payload_mtu:4;
+ __u8 rdc_exists:1;
+ __u8 rnr_retry_count:3;
+ __u8 max_cm_retries:4;
+ __u8 srq:1;
+ __u8 reserved3:3;
+ __u16 primary_local_port_lid;
+ __u16 primary_remote_port_lid;
+ __u64 primary_local_port_gid[2];
+ __u64 primary_remote_port_gid[2];
+ __u32 primary_flow_label:20;
+ __u32 reserved4:6;
+ __u32 primary_packet_rate:6;
+ __u32 primary_traffic_class:8;
+ __u32 primary_hop_limit:8;
+ __u32 primary_sl:4;
+ __u32 primary_subnet_local:1;
+ __u32 reserved5:3;
+ __u32 primary_local_ack_tout:5;
+ __u32 reserved6:3;
+ __u32 alternate_local_port_lid:16;
+ __u32 alternate_remote_port_lid:16;
+ __u64 alternate_local_port_gid[2];
+ __u64 alternate_remote_port_gid[2];
+ __u32 alternate_flow_labe:20;
+ __u32 reserved7:6;
+ __u32 alternate_packet_rate:6;
+ __u32 alternate_traffic_class:8;
+ __u32 alternate_hop_limit:8;
+ __u32 alternate_sl:4;
+ __u32 alternate_subnet_local:1;
+ __u32 reserved8:3;
+ __u32 alternate_local_ack_timeout: 5;
+ __u32 reserved9:3;
+ __u8 private_data[92];
+} __packed;
+
+/* MRA Message contents */
+/* Message Receipt Acknoldgement */
+struct mra {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 message_mraed:2;
+ __u8 reserved1:6;
+ __u8 service_timeout:5;
+ __u8 reserved2:3;
+ __u8 private_data[222];
+} __packed;
+
+/* REJ Message contents */
+struct rej {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 message_rejected:2;
+ __u8 reserved1:6;
+ __u8 reject_info_length:7;
+ __u8 reserved2:1;
+ __u16 reason;
+ __u8 additional_reject_info[72];
+ __u8 private_data[148];
+} __packed;
+
+/* REP Message contents */
+struct rep {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u32 local_q_key;
+ __u32 local_qpn:24;
+ __u8 reserved1:8;
+ __u32 local_ee_context:24;
+ __u32 reserved2:8;
+ __u8 start_psn[3];
+ __u8 reserved3;
+ __u8 responder_resources;
+ __u8 initiator_depth;
+ union {
+ __u8 target_fail_end;
+ __u8 target_ack_delay:5;
+ __u8 fail_over_accepted:2;
+ };
+ __u8 end_end_flow_control:1;
+ __u8 rnr_retry_count:3;
+ __u8 sqr:1;
+ __u8 reserved4:4;
+ __u8 local_ca_guid[8];
+ __u8 private_data[196];
+} __packed;
+
+/* RTU indicates that the connection is established,
+ * and that the recipient
+ * may begin transmitting
+ */
+struct rtu {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 private_data[224];
+} __packed;
+
+#define XRNIC_SEND_UD 0x64
+#define XRNIC_SET_SOLICT_EVENT 0x0
+#define XRNIC_RESET_SOLICT_EVENT 0x0
+#define XRNIC_MIGRATION_REQ 0x0
+#define XRNIC_PAD_COUNT 0x0
+#define XRNIC_TRANSPORT_HDR_VER 0x0
+#define XRNIC_DESTINATION_QP 0x1
+#define XRNIC_RESERVED1 0x0
+#define XRNIC_ACK_REQ 0x0
+#define XRNIC_RESERVED2 0x0
+
+struct bth {
+ __u8 opcode;
+ __u8 solicited_event:1;
+ __u8 migration_req:1;
+ __u8 pad_count:2;
+ __u8 transport_hdr_ver:4;
+ __be16 partition_key;
+ __u8 reserved1;
+ __u8 destination_qp[3];
+ __u32 ack_request:1;
+ __u32 reserved2:7;
+ __u32 pkt_seq_num:24;
+} __packed;
+
+#define XRNIC_DETH_RESERVED 0
+struct deth {
+ __be32 q_key;
+ __u8 reserved;
+ __be32 src_qp:24;
+} __packed;
+
+/* DREQ request for communication release*/
+struct dreq {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u32 remote_qpn_eecn:24;
+ __u32 reserved:8;
+ __u8 private_data[220];
+} __packed;
+
+/* DREP - reply to request for communication release */
+struct drep {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 private_data[228];
+} __packed;
+
+/* LAP - load alternate path */
+struct lap {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u32 reserved1;
+ __u32 remote_QPN_EECN:24;
+ __u32 remote_cm_response_timeout:5;
+ __u32 reserved2:3;
+ __u32 reserved3;
+ __u32 alt_local_port_id:16;
+ __u32 alt_remote_port_id:16;
+ __u64 alt_local_port_gid[2];
+ __u64 alt_remote_port_gid[2];
+ __u32 alt_flow_label:20;
+ __u32 reserved4:4;
+ __u32 alt_traffic_class:8;
+ __u32 alt_hope_limit:8;
+ __u32 reserved5:2;
+ __u32 alt_pkt_rate:6;
+ __u32 alt_sl:4;
+ __u32 alt_subnet_local:1;
+ __u32 reserved6:3;
+ __u32 alt_local_ack_timeout:5;
+ __u32 reserved7:3;
+ __u8 private_data[168];
+} __packed;
+
+/* APR - alternate path response */
+struct apr {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 additional_info_length;
+ __u8 ap_status;
+ __u8 reserved1[2];
+ __u8 additional_info[72];
+ __u8 private_data[148];
+} __packed;
+
+enum cm_establishment_states {
+ CLASS_PORT_INFO = 0x1,
+ CONNECT_REQUEST = 0x10, /* Request for connection */
+ MSG_RSP_ACK = 0x11, /* Message Response Ack */
+ CONNECT_REJECT = 0x12, /* Connect Reject */
+ CONNECT_REPLY = 0x13, /* Reply for request communication */
+ READY_TO_USE = 0x14, /* Ready to use */
+ DISCONNECT_REQUEST = 0x15, /* Receive Disconnect req */
+ DISCONNECT_REPLY = 0x16, /* Send Disconnect reply */
+ SERVICE_ID_RESOLUTION_REQ = 0x17,
+ SERVICE_ID_RESOLUTION_REQ_REPLY = 0x18,
+ LOAD_ALTERNATE_PATH = 0x19,
+ ALTERNATE_PATH_RESPONSE = 0x1a,
+};
+
+#define XRNIC_ETH_ALEN 6
+#define XRNIC_ETH_P_IP 0x0800
+#define XRNIC_ETH_P_ARP 0x0806
+#define XRNIC_ETH_HLEN 14
+#define XRNIC_ICRC_SIZE 4
+
+//Ethernet header
+struct ethhdr_t {
+ unsigned char h_dest[XRNIC_ETH_ALEN];
+ unsigned char h_source[XRNIC_ETH_ALEN];
+ __be16 eth_type; /*< packet type ID field */
+} __packed;
+
+struct ipv4hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 ihl:4,
+ version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version:4, /*< Internet Header Length */
+ ihl:4; /*< Version */
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 tos; /*< Type of service */
+ __be16 total_length; /*< Total length */
+ __be16 id; /*< Identification */
+ u16 frag_off; /*< Fragment offset */
+ __u8 time_to_live; /*< Time to live */
+ __u8 protocol; /*< Protocol */
+ __be16 hdr_chksum; /*< Header checksum */
+ __be32 src_addr; /*< Source address */
+ __be32 dest_addr; /*< Destination address */
+} __packed;
+
+struct qp_cm_pkt {
+ struct ethhdr_t eth; //14 Byte
+ union {
+ struct ipv4hdr ipv4; //20 bytes
+ struct ipv4hdr ipv6; //20 bytes
+ } ip;
+ struct udphdr udp; //8 Byte
+ struct bth bth; //12 Bytes
+ struct deth deth; //8 Byte
+ struct mad mad; //[XRNIC_MAD_HEADER + XRNIC_MAD_DATA]
+} __packed;
+
+/*
+ * RoCEv2 packet for receiver. Duplicated for ease of code readability.
+ */
+struct qp_cm_pkt_hdr_ipv4 {
+ struct ethhdr_t eth; //14 Byte
+ struct ipv4hdr ipv4;
+ struct udphdr udp; //8 Byte
+ struct bth bth;
+ struct deth deth; //8 Byte
+ struct mad mad; //[XRNIC_MAD_HEADER + XRNIC_MAD_DATA]
+} __packed;
+
+struct qp_cm_pkt_hdr_ipv6 {
+ struct ethhdr_t eth; //14 Byte
+ struct ipv6hdr ipv6;
+ struct udphdr udp; //8 Byte
+ struct bth bth;
+ struct deth deth; //8 Byte
+ struct mad mad; //[XRNIC_MAD_HEADER + XRNIC_MAD_DATA]
+} __packed;
+
+/* MAD Packet validation defines */
+#define MAD_BASIC_VER 1
+#define OPCODE_SEND_UD 0x64
+
+#define MAD_SUBNET_CLASS 0x1
+#define MAD_DIRECT_SUBNET_CLASS 0x81
+
+#define MAD_SEND_CM_MSG 0x03
+#define MAD_VERF_FAILED -1
+#define MAD_VERF_SUCCESS 0
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_ROCEV2_H*/
diff --git a/drivers/staging/xlnxsync/Kconfig b/drivers/staging/xlnxsync/Kconfig
new file mode 100644
index 000000000000..08e73384dc94
--- /dev/null
+++ b/drivers/staging/xlnxsync/Kconfig
@@ -0,0 +1,11 @@
+config XLNX_SYNC
+ tristate "Xilinx Synchronizer"
+ depends on ARCH_ZYNQMP
+ help
+ This driver is developed for Xilinx Synchronizer IP. It is used to
+ monitor the AXI addresses of the producer and initiate the
+ consumer to start earlier, thereby reducing the latency to process
+ the data.
+
+ To compile this driver as a module, choose M here.
+ If unsure, choose N
diff --git a/drivers/staging/xlnxsync/MAINTAINERS b/drivers/staging/xlnxsync/MAINTAINERS
new file mode 100644
index 000000000000..e2d720419783
--- /dev/null
+++ b/drivers/staging/xlnxsync/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX SYNCHRONIZER DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnxsync
diff --git a/drivers/staging/xlnxsync/Makefile b/drivers/staging/xlnxsync/Makefile
new file mode 100644
index 000000000000..b126a36da37c
--- /dev/null
+++ b/drivers/staging/xlnxsync/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XLNX_SYNC) += xlnxsync.o
diff --git a/drivers/staging/xlnxsync/dt-binding.txt b/drivers/staging/xlnxsync/dt-binding.txt
new file mode 100644
index 000000000000..1d22f38cf237
--- /dev/null
+++ b/drivers/staging/xlnxsync/dt-binding.txt
@@ -0,0 +1,34 @@
+Xilinx Synchronizer
+-------------------
+
+The Xilinx Synchronizer is used for buffer synchronization between
+producer and consumer blocks. It manages to do so by tapping onto the bus
+where the producer block is writing frame data to memory and consumer block is
+reading the frame data from memory.
+
+It can work on the encode path with max 4 channels or on decode path with
+max 2 channels.
+
+Required properties:
+- compatible : Must contain "xlnx,sync-1.0"
+- reg: Physical base address and length of the registers set for the device.
+- interrupts: Contains the interrupt line number.
+- interrupt-parent: phandle to interrupt controller.
+- clock-names: The input clock names for axilite, producer and consumer clock.
+- clocks: Reference to the clock that drives the axi interface, producer and consumer.
+- xlnx,num-chan: Range from 1 to 2 for decode.
+ Range from 1 to 4 for encode.
+
+Optional properties:
+- xlnx,encode: Present if IP configured for encoding path, else absent.
+
+v_sync_vcu: subframe_sync_vcu@a00e0000 {
+ compatible = "xlnx,sync-1.0";
+ reg = <0x0 0xa00e0000 0x0 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 96 4>;
+ clock-names = "s_axi_ctrl", "s_axi_mm_p", "s_axi_mm_c";
+ clocks = <&vid_s_axi_clk>, <&vid_stream_clk>, <&vid_stream_clk>;
+ xlnx,num-chan = <4>;
+ xlnx,encode;
+};
diff --git a/drivers/staging/xlnxsync/xlnxsync.c b/drivers/staging/xlnxsync/xlnxsync.c
new file mode 100644
index 000000000000..563ba3f0545e
--- /dev/null
+++ b/drivers/staging/xlnxsync/xlnxsync.c
@@ -0,0 +1,875 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Synchronizer IP driver
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Vishal Sagar <vishal.sagar@xilinx.com>
+ *
+ * This driver is used to control the Xilinx Synchronizer IP
+ * to achieve sub frame latency for encode and decode with VCU.
+ * This is done by monitoring the address lines for specific values.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <linux/xlnxsync.h>
+
+/* Register offsets and bit masks */
+#define XLNXSYNC_CTRL_REG 0x00
+#define XLNXSYNC_ISR_REG 0x04
+#define XLNXSYNC_L_START_LO_REG 0x08
+#define XLNXSYNC_L_START_HI_REG 0x0C
+#define XLNXSYNC_C_START_LO_REG 0x20
+#define XLNXSYNC_C_START_HI_REG 0x24
+#define XLNXSYNC_L_END_LO_REG 0x38
+#define XLNXSYNC_L_END_HI_REG 0x3C
+#define XLNXSYNC_C_END_LO_REG 0x50
+#define XLNXSYNC_C_END_HI_REG 0x54
+#define XLNXSYNC_L_MARGIN_REG 0x68
+#define XLNXSYNC_C_MARGIN_REG 0x74
+#define XLNXSYNC_IER_REG 0x80
+#define XLNXSYNC_DBG_REG 0x84
+
+#define XLNXSYNC_CTRL_ENCDEC_MASK BIT(0)
+#define XLNXSYNC_CTRL_ENABLE_MASK BIT(1)
+#define XLNXSYNC_CTRL_INTR_EN_MASK BIT(2)
+
+#define XLNXSYNC_ISR_SYNC_FAIL_MASK BIT(0)
+#define XLNXSYNC_ISR_WDG_ERR_MASK BIT(1)
+#define XLNXSYNC_ISR_LDONE_SHIFT (2)
+#define XLNXSYNC_ISR_LDONE_MASK GENMASK(3, 2)
+#define XLNXSYNC_ISR_LSKIP_MASK BIT(4)
+#define XLNXSYNC_ISR_LVALID_MASK BIT(5)
+#define XLNXSYNC_ISR_CDONE_SHIFT (6)
+#define XLNXSYNC_ISR_CDONE_MASK GENMASK(7, 6)
+#define XLNXSYNC_ISR_CSKIP_MASK BIT(8)
+#define XLNXSYNC_ISR_CVALID_MASK BIT(9)
+
+/* bit 44 of start address */
+#define XLNXSYNC_FB_VALID_MASK BIT(12)
+#define XLNXSYNC_FB_HI_ADDR_MASK GENMASK(11, 0)
+
+#define XLNXSYNC_IER_SYNC_FAIL_MASK BIT(0)
+#define XLNXSYNC_IER_WDG_ERR_MASK BIT(1)
+#define XLNXSYNC_IER_LVALID_MASK BIT(5)
+#define XLNXSYNC_IER_CVALID_MASK BIT(9)
+
+#define XLNXSYNC_IER_ALL_MASK (XLNXSYNC_IER_SYNC_FAIL_MASK |\
+ XLNXSYNC_IER_WDG_ERR_MASK |\
+ XLNXSYNC_IER_LVALID_MASK |\
+ XLNXSYNC_IER_CVALID_MASK)
+
+/* Other macros */
+#define XLNXSYNC_CHAN_OFFSET 0x100
+
+#define XLNXSYNC_DEVNAME_LEN (32)
+
+#define XLNXSYNC_DRIVER_NAME "xlnxsync"
+
+#define XLNXSYNC_DEV_MAX 256
+
+/* Used to keep track of sync devices */
+static DEFINE_IDA(xs_ida);
+
+/**
+ * struct xlnxsync_device - Xilinx Synchronizer struct
+ * @miscdev: Miscellaneous device struct
+ * @config: IP config struct
+ * @dev: Pointer to device
+ * @iomem: Pointer to the register space
+ * @irq: IRQ number
+ * @irq_lock: Spinlock used to protect access to sync and watchdog error
+ * @wait_event: wait queue for error events
+ * @sync_err: Capture synchronization error per channel
+ * @wdg_err: Capture watchdog error per channel
+ * @l_done: Luma done result array
+ * @c_done: Chroma done result array
+ * @axi_clk: Pointer to clock structure for axilite clock
+ * @p_clk: Pointer to clock structure for producer clock
+ * @c_clk: Pointer to clock structure for consumer clock
+ * @minor: device id count
+ *
+ * This structure contains the device driver related parameters
+ */
+struct xlnxsync_device {
+ struct miscdevice miscdev;
+ struct xlnxsync_config config;
+ struct device *dev;
+ void __iomem *iomem;
+ int irq;
+ /* irq_lock is used to protect access to sync_err and wdg_err */
+ spinlock_t irq_lock;
+ wait_queue_head_t wait_event;
+ bool sync_err[XLNXSYNC_MAX_ENC_CHANNEL];
+ bool wdg_err[XLNXSYNC_MAX_ENC_CHANNEL];
+ bool l_done[XLNXSYNC_MAX_ENC_CHANNEL][XLNXSYNC_BUF_PER_CHANNEL];
+ bool c_done[XLNXSYNC_MAX_ENC_CHANNEL][XLNXSYNC_BUF_PER_CHANNEL];
+ struct clk *axi_clk;
+ struct clk *p_clk;
+ struct clk *c_clk;
+ int minor;
+};
+
+static inline struct xlnxsync_device *to_xlnxsync_device(struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+
+ return container_of(miscdev, struct xlnxsync_device, miscdev);
+}
+
+static inline u32 xlnxsync_read(struct xlnxsync_device *dev, u32 chan, u32 reg)
+{
+ return ioread32(dev->iomem + (chan * XLNXSYNC_CHAN_OFFSET) + reg);
+}
+
+static inline void xlnxsync_write(struct xlnxsync_device *dev, u32 chan,
+ u32 reg, u32 val)
+{
+ iowrite32(val, dev->iomem + (chan * XLNXSYNC_CHAN_OFFSET) + reg);
+}
+
+static inline void xlnxsync_clr(struct xlnxsync_device *dev, u32 chan, u32 reg,
+ u32 clr)
+{
+ xlnxsync_write(dev, chan, reg, xlnxsync_read(dev, chan, reg) & ~clr);
+}
+
+static inline void xlnxsync_set(struct xlnxsync_device *dev, u32 chan, u32 reg,
+ u32 set)
+{
+ xlnxsync_write(dev, chan, reg, xlnxsync_read(dev, chan, reg) | set);
+}
+
+static bool xlnxsync_is_buf_done(struct xlnxsync_device *dev,
+ u32 channel, u32 buf)
+{
+ u32 luma_valid, chroma_valid;
+
+ luma_valid = xlnxsync_read(dev, channel,
+ XLNXSYNC_L_START_HI_REG + (buf << 3)) &
+ XLNXSYNC_FB_VALID_MASK;
+ chroma_valid = xlnxsync_read(dev, channel,
+ XLNXSYNC_C_START_HI_REG + (buf << 3)) &
+ XLNXSYNC_FB_VALID_MASK;
+ if (!luma_valid && !chroma_valid)
+ return true;
+
+ return false;
+}
+
+static void xlnxsync_reset_chan(struct xlnxsync_device *dev, u32 chan)
+{
+ u32 i;
+
+ xlnxsync_write(dev, chan, XLNXSYNC_CTRL_REG, 0);
+ xlnxsync_write(dev, chan, XLNXSYNC_IER_REG, 0);
+ for (i = 0; i < XLNXSYNC_BUF_PER_CHANNEL; i++) {
+ xlnxsync_write(dev, chan,
+ XLNXSYNC_L_START_LO_REG + (i << 3), 0);
+ xlnxsync_write(dev, chan,
+ XLNXSYNC_L_START_HI_REG + (i << 3), 0);
+ xlnxsync_write(dev, chan,
+ XLNXSYNC_C_START_LO_REG + (i << 3), 0);
+ xlnxsync_write(dev, chan,
+ XLNXSYNC_C_START_HI_REG + (i << 3), 0);
+ xlnxsync_write(dev, chan,
+ XLNXSYNC_L_END_LO_REG + (i << 3), 0);
+ xlnxsync_write(dev, chan,
+ XLNXSYNC_L_END_HI_REG + (i << 3), 0);
+ xlnxsync_write(dev, chan,
+ XLNXSYNC_C_END_LO_REG + (i << 3), 0);
+ xlnxsync_write(dev, chan,
+ XLNXSYNC_C_END_HI_REG + (i << 3), 0);
+ }
+ xlnxsync_write(dev, chan, XLNXSYNC_L_MARGIN_REG, 0);
+ xlnxsync_write(dev, chan, XLNXSYNC_C_MARGIN_REG, 0);
+}
+
+static int xlnxsync_config_channel(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ struct xlnxsync_chan_config cfg;
+ int ret, i = 0;
+
+ ret = copy_from_user(&cfg, arg, sizeof(cfg));
+ if (ret) {
+ dev_err(dev->dev, "%s : Failed to copy from user\n", __func__);
+ return ret;
+ }
+
+ if (cfg.channel_id >= dev->config.max_channels &&
+ cfg.channel_id != XLNXSYNC_AUTO_SEARCH) {
+ dev_err(dev->dev, "%s : Incorrect channel id %d\n",
+ __func__, cfg.channel_id);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev->dev, "Channel id = %d, FB id = %d IsMono = %d\n",
+ cfg.channel_id, cfg.fb_id, cfg.ismono);
+ dev_dbg(dev->dev, "Luma Start Addr = 0x%llx End Addr = 0x%llx Margin = 0x%08x\n",
+ cfg.luma_start_address, cfg.luma_end_address, cfg.luma_margin);
+ dev_dbg(dev->dev, "Chroma Start Addr = 0x%llx End Addr = 0x%llx Margin = 0x%08x\n",
+ cfg.chroma_start_address, cfg.chroma_end_address,
+ cfg.chroma_margin);
+
+ if (cfg.channel_id == XLNXSYNC_AUTO_SEARCH) {
+ ret = -EBUSY;
+ for (i = 0; i < dev->config.max_channels; i++) {
+ u32 val;
+
+ val = xlnxsync_read(dev, i, XLNXSYNC_CTRL_REG);
+ if (!(val & XLNXSYNC_CTRL_ENABLE_MASK)) {
+ cfg.channel_id = i;
+ ret = 0;
+ dev_dbg(dev->dev,
+ "Channel id auto assigned = %d\n", i);
+ break;
+ }
+ }
+
+ if (ret) {
+ dev_dbg(dev->dev, "Unable to find free channel\n");
+ return ret;
+ }
+ }
+
+ if (cfg.fb_id == XLNXSYNC_AUTO_SEARCH) {
+ /* When fb_id is 0xFF auto search for free fb in a channel */
+ dev_dbg(dev->dev, "%s : auto search free fb\n", __func__);
+ for (i = 0; i < XLNXSYNC_BUF_PER_CHANNEL; i++) {
+ if (xlnxsync_is_buf_done(dev, cfg.channel_id, i))
+ break;
+ dev_dbg(dev->dev, "Channel %d FB %d is busy\n",
+ cfg.channel_id, i);
+ }
+
+ if (i == XLNXSYNC_BUF_PER_CHANNEL)
+ return -EBUSY;
+
+ } else if (cfg.fb_id < XLNXSYNC_BUF_PER_CHANNEL) {
+ /* If fb_id is specified, check its availability */
+ if (!(xlnxsync_is_buf_done(dev, cfg.channel_id, cfg.fb_id))) {
+ dev_dbg(dev->dev,
+ "%s : FB %d in channel %d is busy!\n",
+ __func__, i, cfg.channel_id);
+ return -EBUSY;
+ }
+ i = cfg.fb_id;
+ dev_dbg(dev->dev, "%s : Configure fb %d\n", __func__, i);
+ } else {
+ /* Invalid fb_id passed */
+ dev_err(dev->dev, "Invalid FB id %d for configuration!\n",
+ cfg.fb_id);
+ return -EINVAL;
+ }
+
+ /* Start Address */
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_L_START_LO_REG + (i << 3),
+ lower_32_bits(cfg.luma_start_address));
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_L_START_HI_REG + (i << 3),
+ upper_32_bits(cfg.luma_start_address) &
+ XLNXSYNC_FB_HI_ADDR_MASK);
+
+ /* End Address */
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_L_END_LO_REG + (i << 3),
+ lower_32_bits(cfg.luma_end_address));
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_L_END_HI_REG + (i << 3),
+ upper_32_bits(cfg.luma_end_address));
+
+ /* Set margin */
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_L_MARGIN_REG + (i << 2),
+ cfg.luma_margin);
+
+ if (!cfg.ismono) {
+ dev_dbg(dev->dev, "%s : Not monochrome. Program Chroma\n",
+ __func__);
+ /* Chroma Start Address */
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_C_START_LO_REG + (i << 3),
+ lower_32_bits(cfg.chroma_start_address));
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_C_START_HI_REG + (i << 3),
+ upper_32_bits(cfg.chroma_start_address) &
+ XLNXSYNC_FB_HI_ADDR_MASK);
+ /* Chroma End Address */
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_C_END_LO_REG + (i << 3),
+ lower_32_bits(cfg.chroma_end_address));
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_C_END_HI_REG + (i << 3),
+ upper_32_bits(cfg.chroma_end_address));
+ /* Chroma Margin */
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_C_MARGIN_REG + (i << 2),
+ cfg.chroma_margin);
+ /* Set the Valid bit */
+ xlnxsync_set(dev, cfg.channel_id,
+ XLNXSYNC_C_START_HI_REG + (i << 3),
+ XLNXSYNC_FB_VALID_MASK);
+ }
+
+ /* Set the Valid bit */
+ xlnxsync_set(dev, cfg.channel_id,
+ XLNXSYNC_L_START_HI_REG + (i << 3),
+ XLNXSYNC_FB_VALID_MASK);
+
+ return 0;
+}
+
+static int xlnxsync_get_channel_status(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ int ret;
+ u32 mask = 0, i, j;
+ unsigned long flags;
+
+ for (i = 0; i < dev->config.max_channels; i++) {
+ /* Update Buffers status */
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHANNEL; j++) {
+ if (xlnxsync_is_buf_done(dev, i, j)) {
+ mask |= 1 << ((i << XLNXSYNC_BUF_PER_CHANNEL)
+ + j);
+ }
+ }
+
+ /* Update channel enable status */
+ if (xlnxsync_read(dev, i, XLNXSYNC_CTRL_REG) &
+ XLNXSYNC_CTRL_ENABLE_MASK)
+ mask |= XLNXSYNC_CHX_ENB_MASK(i);
+
+ /* Update channel error status */
+ spin_lock_irqsave(&dev->irq_lock, flags);
+
+ if (dev->sync_err[i])
+ mask |= XLNXSYNC_CHX_SYNC_ERR_MASK(i);
+
+ if (dev->wdg_err[i])
+ mask |= XLNXSYNC_CHX_WDG_ERR_MASK(i);
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ }
+
+ ret = copy_to_user(arg, &mask, sizeof(mask));
+ if (ret) {
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+ return ret;
+ }
+ dev_dbg(dev->dev, "%s - Channel status = 0x%08x\n", __func__, mask);
+ return ret;
+}
+
+static int xlnxsync_enable(struct xlnxsync_device *dev, u32 channel,
+ bool enable)
+{
+ /* check channel v/s max from dt */
+ if (channel >= dev->config.max_channels) {
+ dev_err(dev->dev, "Invalid channel %d. Max channels = %d!\n",
+ channel, dev->config.max_channels);
+ return -EINVAL;
+ }
+
+ if (enable) {
+ dev_dbg(dev->dev, "Enabling %d channel\n", channel);
+ xlnxsync_set(dev, channel, XLNXSYNC_IER_REG,
+ XLNXSYNC_IER_ALL_MASK);
+ xlnxsync_set(dev, channel, XLNXSYNC_CTRL_REG,
+ XLNXSYNC_CTRL_ENABLE_MASK |
+ XLNXSYNC_CTRL_INTR_EN_MASK);
+ } else {
+ dev_dbg(dev->dev, "Disabling %d channel\n", channel);
+ xlnxsync_clr(dev, channel, XLNXSYNC_CTRL_REG,
+ XLNXSYNC_CTRL_ENABLE_MASK |
+ XLNXSYNC_CTRL_INTR_EN_MASK);
+ xlnxsync_clr(dev, channel, XLNXSYNC_IER_REG,
+ XLNXSYNC_IER_ALL_MASK);
+ }
+
+ return 0;
+}
+
+static int xlnxsync_get_config(struct xlnxsync_device *dev, void __user *arg)
+{
+ struct xlnxsync_config cfg;
+ int ret;
+
+ cfg.encode = dev->config.encode;
+ cfg.max_channels = dev->config.max_channels;
+
+ dev_dbg(dev->dev, "IP Config : encode = %d max_channels = %d\n",
+ cfg.encode, cfg.max_channels);
+ ret = copy_to_user(arg, &cfg, sizeof(cfg));
+ if (ret) {
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xlnxsync_clr_chan_err(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ struct xlnxsync_clr_err errcfg;
+ int ret;
+ unsigned long flags;
+
+ ret = copy_from_user(&errcfg, arg, sizeof(errcfg));
+ if (ret) {
+ dev_err(dev->dev, "%s : Failed to copy from user\n", __func__);
+ return ret;
+ }
+
+ if (errcfg.channel_id >= dev->config.max_channels) {
+ dev_err(dev->dev, "%s : Incorrect channel id %d\n",
+ __func__, errcfg.channel_id);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev->dev, "%s : Clearing %d channel errors\n",
+ __func__, errcfg.channel_id);
+ /* Clear channel error status */
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ if (dev->sync_err[errcfg.channel_id])
+ dev->sync_err[errcfg.channel_id] = false;
+
+ if (dev->wdg_err[errcfg.channel_id])
+ dev->wdg_err[errcfg.channel_id] = false;
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return 0;
+}
+
+static int xlnxsync_get_fbdone_status(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ struct xlnxsync_fbdone fbdone_stat;
+ int ret, i, j;
+
+ for (i = 0; i < dev->config.max_channels; i++)
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHANNEL; j++)
+ if (dev->l_done[i][j] && dev->c_done[i][j])
+ fbdone_stat.status[i][j] = true;
+
+ ret = copy_to_user(arg, &fbdone_stat, sizeof(fbdone_stat));
+ if (ret)
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+
+ return ret;
+}
+
+static int xlnxsync_clr_fbdone_status(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ struct xlnxsync_fbdone fbd;
+ int ret, i, j;
+ unsigned long flags;
+
+ ret = copy_from_user(&fbd, arg, sizeof(fbd));
+ if (ret) {
+ dev_err(dev->dev, "%s : Failed to copy from user\n", __func__);
+ return ret;
+ }
+
+ /* Clear channel error status */
+ spin_lock_irqsave(&dev->irq_lock, flags);
+
+ for (i = 0; i < dev->config.max_channels; i++) {
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHANNEL; j++) {
+ if (fbd.status[i][j]) {
+ dev->l_done[i][j] = false;
+ dev->c_done[i][j] = false;
+ fbd.status[i][j] = false;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return 0;
+}
+
+static long xlnxsync_ioctl(struct file *fptr, unsigned int cmd,
+ unsigned long data)
+{
+ int ret = -EINVAL;
+ u32 channel = data;
+ void __user *arg = (void __user *)data;
+ struct xlnxsync_device *xlnxsync_dev = to_xlnxsync_device(fptr);
+
+ dev_dbg(xlnxsync_dev->dev, "ioctl = 0x%08x\n", cmd);
+
+ switch (cmd) {
+ case XLNXSYNC_GET_CFG:
+ ret = xlnxsync_get_config(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_GET_CHAN_STATUS:
+ ret = xlnxsync_get_channel_status(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_SET_CHAN_CONFIG:
+ ret = xlnxsync_config_channel(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_CHAN_ENABLE:
+ ret = xlnxsync_enable(xlnxsync_dev, channel, true);
+ break;
+ case XLNXSYNC_CHAN_DISABLE:
+ ret = xlnxsync_enable(xlnxsync_dev, channel, false);
+ if (ret < 0)
+ return ret;
+ xlnxsync_reset_chan(xlnxsync_dev, channel);
+ break;
+ case XLNXSYNC_CLR_CHAN_ERR:
+ ret = xlnxsync_clr_chan_err(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_GET_CHAN_FBDONE_STAT:
+ ret = xlnxsync_get_fbdone_status(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_CLR_CHAN_FBDONE_STAT:
+ ret = xlnxsync_clr_fbdone_status(xlnxsync_dev, arg);
+ break;
+ }
+
+ return ret;
+}
+
+static __poll_t xlnxsync_poll(struct file *fptr, poll_table *wait)
+{
+ u32 i, j;
+ bool err_event, framedone_event;
+ __poll_t ret = 0;
+ unsigned long flags;
+ struct xlnxsync_device *dev = to_xlnxsync_device(fptr);
+
+ ret = poll_requested_events(wait);
+
+ dev_dbg_ratelimited(dev->dev, "%s : entered req_events = 0x%x!\n",
+ __func__, ret);
+
+ if (!(ret & (POLLPRI | POLLIN)))
+ return 0;
+
+ poll_wait(fptr, &dev->wait_event, wait);
+
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ err_event = false;
+ for (i = 0; i < dev->config.max_channels && !err_event; i++) {
+ if (dev->sync_err[i] || dev->wdg_err[i])
+ err_event = true;
+ }
+
+ framedone_event = false;
+ for (i = 0; i < dev->config.max_channels && !framedone_event; i++) {
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHANNEL; j++) {
+ if (dev->l_done[i][j] && dev->c_done[i][j])
+ framedone_event = true;
+ }
+ }
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ if (err_event) {
+ dev_dbg_ratelimited(dev->dev, "%s : error event occurred!\n",
+ __func__);
+ ret |= POLLPRI;
+ }
+
+ if (framedone_event) {
+ dev_dbg_ratelimited(dev->dev, "%s : framedone event occurred!\n",
+ __func__);
+ ret |= POLLIN;
+ }
+
+ return ret;
+}
+
+static const struct file_operations xlnxsync_fops = {
+ .unlocked_ioctl = xlnxsync_ioctl,
+ .poll = xlnxsync_poll,
+};
+
+static irqreturn_t xlnxsync_irq_handler(int irq, void *data)
+{
+ struct xlnxsync_device *xlnxsync = (struct xlnxsync_device *)data;
+ u32 val, i;
+ bool err_event;
+ bool framedone_event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xlnxsync->irq_lock, flags);
+ err_event = false;
+ framedone_event = false;
+ for (i = 0; i < xlnxsync->config.max_channels; i++) {
+ u32 j, buf_index;
+
+ val = xlnxsync_read(xlnxsync, i, XLNXSYNC_ISR_REG);
+ xlnxsync_write(xlnxsync, i, XLNXSYNC_ISR_REG, val);
+
+ if (val & XLNXSYNC_ISR_SYNC_FAIL_MASK)
+ xlnxsync->sync_err[i] = true;
+ if (val & XLNXSYNC_ISR_WDG_ERR_MASK)
+ xlnxsync->wdg_err[i] = true;
+ if (xlnxsync->sync_err[i] || xlnxsync->wdg_err[i])
+ err_event = true;
+
+ if (val & XLNXSYNC_ISR_LDONE_MASK) {
+ buf_index = (val & XLNXSYNC_ISR_LDONE_MASK) >>
+ XLNXSYNC_ISR_LDONE_SHIFT;
+
+ xlnxsync->l_done[i][buf_index] = true;
+ }
+
+ if (val & XLNXSYNC_ISR_CDONE_MASK) {
+ buf_index = (val & XLNXSYNC_ISR_CDONE_MASK) >>
+ XLNXSYNC_ISR_CDONE_SHIFT;
+
+ xlnxsync->c_done[i][buf_index] = true;
+ }
+
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHANNEL; j++)
+ if (xlnxsync->l_done[i][j] && xlnxsync->c_done[i][j])
+ framedone_event = true;
+ }
+ spin_unlock_irqrestore(&xlnxsync->irq_lock, flags);
+
+ if (err_event || framedone_event) {
+ dev_dbg_ratelimited(xlnxsync->dev, "%s : error occurred\n",
+ __func__);
+ wake_up_interruptible(&xlnxsync->wait_event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xlnxsync_parse_dt_prop(struct xlnxsync_device *xlnxsync)
+{
+ struct device_node *node = xlnxsync->dev->of_node;
+ int ret;
+
+ xlnxsync->config.encode = of_property_read_bool(node, "xlnx,encode");
+ dev_dbg(xlnxsync->dev, "synchronizer type = %s\n",
+ xlnxsync->config.encode ? "encode" : "decode");
+
+ ret = of_property_read_u32(node, "xlnx,num-chan",
+ (u32 *)&xlnxsync->config.max_channels);
+ if (ret)
+ return ret;
+
+ dev_dbg(xlnxsync->dev, "max channels = %d\n",
+ xlnxsync->config.max_channels);
+
+ if (xlnxsync->config.max_channels == 0 ||
+ xlnxsync->config.max_channels > XLNXSYNC_MAX_ENC_CHANNEL) {
+ dev_err(xlnxsync->dev, "Number of channels should be 1 to 4.\n");
+ dev_err(xlnxsync->dev, "Invalid number of channels : %d\n",
+ xlnxsync->config.max_channels);
+ return -EINVAL;
+ }
+
+ if (!xlnxsync->config.encode &&
+ xlnxsync->config.max_channels > XLNXSYNC_MAX_DEC_CHANNEL) {
+ dev_err(xlnxsync->dev, "Decode can't have more than 2 channels.\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int xlnxsync_clk_setup(struct xlnxsync_device *xlnxsync)
+{
+ int ret;
+
+ xlnxsync->axi_clk = devm_clk_get(xlnxsync->dev, "s_axi_ctrl");
+ if (IS_ERR(xlnxsync->axi_clk)) {
+ ret = PTR_ERR(xlnxsync->axi_clk);
+ dev_err(xlnxsync->dev, "failed to get axi_aclk (%d)\n", ret);
+ return ret;
+ }
+
+ xlnxsync->p_clk = devm_clk_get(xlnxsync->dev, "s_axi_mm_p");
+ if (IS_ERR(xlnxsync->p_clk)) {
+ ret = PTR_ERR(xlnxsync->p_clk);
+ dev_err(xlnxsync->dev, "failed to get p_aclk (%d)\n", ret);
+ return ret;
+ }
+
+ xlnxsync->c_clk = devm_clk_get(xlnxsync->dev, "s_axi_mm_c");
+ if (IS_ERR(xlnxsync->c_clk)) {
+ ret = PTR_ERR(xlnxsync->c_clk);
+ dev_err(xlnxsync->dev, "failed to get c_aclk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xlnxsync->axi_clk);
+ if (ret) {
+ dev_err(xlnxsync->dev, "failed to enable axi_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xlnxsync->p_clk);
+ if (ret) {
+ dev_err(xlnxsync->dev, "failed to enable p_clk (%d)\n", ret);
+ goto err_pclk;
+ }
+
+ ret = clk_prepare_enable(xlnxsync->c_clk);
+ if (ret) {
+ dev_err(xlnxsync->dev, "failed to enable c_clk (%d)\n", ret);
+ goto err_cclk;
+ }
+
+ return ret;
+
+err_cclk:
+ clk_disable_unprepare(xlnxsync->p_clk);
+err_pclk:
+ clk_disable_unprepare(xlnxsync->axi_clk);
+
+ return ret;
+}
+
+static int xlnxsync_probe(struct platform_device *pdev)
+{
+ struct xlnxsync_device *xlnxsync;
+ /* struct device *dc; */
+ struct resource *res;
+ int ret;
+
+ xlnxsync = devm_kzalloc(&pdev->dev, sizeof(*xlnxsync), GFP_KERNEL);
+ if (!xlnxsync)
+ return -ENOMEM;
+
+ xlnxsync->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get resource.\n");
+ return -ENODEV;
+ }
+
+ xlnxsync->iomem = devm_ioremap_nocache(xlnxsync->dev, res->start,
+ resource_size(res));
+ if (!xlnxsync->iomem) {
+ dev_err(&pdev->dev, "ip register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ ret = xlnxsync_parse_dt_prop(xlnxsync);
+ if (ret < 0)
+ return ret;
+
+ xlnxsync->irq = irq_of_parse_and_map(xlnxsync->dev->of_node, 0);
+ if (!xlnxsync->irq) {
+ dev_err(xlnxsync->dev, "Unable to parse and get irq.\n");
+ return -EINVAL;
+ }
+ ret = devm_request_threaded_irq(xlnxsync->dev, xlnxsync->irq, NULL,
+ xlnxsync_irq_handler, IRQF_ONESHOT,
+ dev_name(xlnxsync->dev), xlnxsync);
+
+ if (ret) {
+ dev_err(xlnxsync->dev, "Err = %d Interrupt handler reg failed!\n",
+ ret);
+ return ret;
+ }
+
+ ret = xlnxsync_clk_setup(xlnxsync);
+ if (ret) {
+ dev_err(xlnxsync->dev, "clock setup failed!\n");
+ return ret;
+ }
+
+ init_waitqueue_head(&xlnxsync->wait_event);
+ spin_lock_init(&xlnxsync->irq_lock);
+
+ xlnxsync->miscdev.minor = MISC_DYNAMIC_MINOR;
+ xlnxsync->miscdev.name = devm_kzalloc(&pdev->dev, XLNXSYNC_DEVNAME_LEN,
+ GFP_KERNEL);
+ if (!xlnxsync->miscdev.name) {
+ ret = -ENOMEM;
+ goto clk_err;
+ }
+
+ xlnxsync->minor = ida_simple_get(&xs_ida, 0, XLNXSYNC_DEV_MAX,
+ GFP_KERNEL);
+ if (xlnxsync->minor < 0) {
+ ret = xlnxsync->minor;
+ goto clk_err;
+ }
+
+ snprintf((char *)xlnxsync->miscdev.name, XLNXSYNC_DEVNAME_LEN, "%s%d",
+ "xlnxsync", xlnxsync->minor);
+ xlnxsync->miscdev.fops = &xlnxsync_fops;
+ ret = misc_register(&xlnxsync->miscdev);
+ if (ret < 0) {
+ dev_err(xlnxsync->dev, "driver registration failed!\n");
+ goto ida_err;
+ }
+
+ platform_set_drvdata(pdev, xlnxsync);
+
+ dev_info(xlnxsync->dev, "Xilinx Synchronizer probe successful!\n");
+
+ return 0;
+
+ida_err:
+ ida_simple_remove(&xs_ida, xlnxsync->minor);
+clk_err:
+ clk_disable_unprepare(xlnxsync->c_clk);
+ clk_disable_unprepare(xlnxsync->p_clk);
+ clk_disable_unprepare(xlnxsync->axi_clk);
+
+ return ret;
+}
+
+static int xlnxsync_remove(struct platform_device *pdev)
+{
+ struct xlnxsync_device *xlnxsync = platform_get_drvdata(pdev);
+
+ misc_deregister(&xlnxsync->miscdev);
+ ida_simple_remove(&xs_ida, xlnxsync->minor);
+ clk_disable_unprepare(xlnxsync->c_clk);
+ clk_disable_unprepare(xlnxsync->p_clk);
+ clk_disable_unprepare(xlnxsync->axi_clk);
+
+ return 0;
+}
+
+static const struct of_device_id xlnxsync_of_match[] = {
+ /* TODO : Change as per dt */
+ { .compatible = "xlnx,sync-1.0", },
+ { /* end of table*/ }
+};
+MODULE_DEVICE_TABLE(of, xlnxsync_of_match);
+
+static struct platform_driver xlnxsync_driver = {
+ .driver = {
+ .name = XLNXSYNC_DRIVER_NAME,
+ .of_match_table = xlnxsync_of_match,
+ },
+ .probe = xlnxsync_probe,
+ .remove = xlnxsync_remove,
+};
+
+module_platform_driver(xlnxsync_driver);
+
+MODULE_AUTHOR("Vishal Sagar");
+MODULE_DESCRIPTION("Xilinx Synchronizer IP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/xroeframer/Kconfig b/drivers/staging/xroeframer/Kconfig
new file mode 100644
index 000000000000..16aa1f2c6a78
--- /dev/null
+++ b/drivers/staging/xroeframer/Kconfig
@@ -0,0 +1,18 @@
+#
+# Xilinx Radio over Ethernet Framer driver
+#
+
+config XROE_FRAMER
+ tristate "Xilinx Radio over Ethernet Framer driver"
+ ---help---
+ The "Radio Over Ethernet Framer" IP (roe_framer) ingests/generates
+ Ethernet packet data, (de-)multiplexes packets based on protocol
+ into/from various Radio Antenna data streams.
+
+ It has 2 main, independent, data paths:
+
+ - Downlink, from the BaseBand to the Phone, Ethernet to Antenna,
+ we call this the De-Framer path, or defm on all related IP signals.
+
+ - Uplink, from the Phone to the BaseBand, Antenna to Ethernet,
+ we call this the Framer path, or fram on all related IP signals.
diff --git a/drivers/staging/xroeframer/Makefile b/drivers/staging/xroeframer/Makefile
new file mode 100644
index 000000000000..f7bf07e98243
--- /dev/null
+++ b/drivers/staging/xroeframer/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Radio over Ethernet Framer driver
+#
+obj-$(CONFIG_XROE_FRAMER) := framer.o
+
+framer-objs := xroe_framer.o \
+ sysfs_xroe.o \
+ sysfs_xroe_framer_ipv4.o \
+ sysfs_xroe_framer_ipv6.o \
+ sysfs_xroe_framer_udp.o \
+ sysfs_xroe_framer_stats.o
diff --git a/drivers/staging/xroeframer/README b/drivers/staging/xroeframer/README
new file mode 100644
index 000000000000..505a46c2cf62
--- /dev/null
+++ b/drivers/staging/xroeframer/README
@@ -0,0 +1,47 @@
+Xilinx Radio over Ethernet Framer driver
+=========================================
+
+About the RoE Framer
+
+The "Radio Over Ethernet Framer" IP (roe_framer) ingests/generates Ethernet
+packet data, (de-)multiplexes packets based on protocol into/from various
+Radio Antenna data streams.
+
+It has 2 main, independent, data paths
+
+- Downlink, from the BaseBand to the Phone, Ethernet to Antenna,
+we call this the De-Framer path, or defm on all related IP signals.
+
+- Uplink, from the Phone to the BaseBand, Antenna to Ethernet,
+we call this the Framer path, or fram on all related IP signals.
+
+Key points:
+
+- Apart from the AXI4-Lite configuration port and a handful of strobe/control
+signals all data interfaces are AXI Stream(AXIS).
+- The IP does not contain an Ethernet MAC IP, rather it routes, or creates
+packets based on the direction through the roe_framer.
+- Currently designed to work with
+ - 1, 2 or 4 10G Ethernet AXIS stream ports to/from 1, 2, 4, 8, 16,
+ or 32 antenna ports
+ Note: each Ethernet port is 64 bit data @ 156.25MHz
+ - 1 or 2 25G Ethernet AXIS stream ports to/from 1, 2, 4, 8, 16,
+ or 32 antenna ports
+ Note: each Ethernet port is 64 bit data @ 390.25MHz
+- Contains a filter so that all non-protocol packets, or non-hardware-IP
+processed packets can be forwarded to another block for processing. In general
+this in a Microprocessor, specifically the Zynq ARM in our case. This filter
+function can move into the optional switch when TSN is used.
+
+About the Linux Driver
+
+The RoE Framer Linux Driver provides sysfs access to the framer controls. The
+loading of the driver to the hardware is possible using Device Tree binding
+(see "dt-binding.txt" for more information). When the driver is loaded, the
+general controls (such as framing mode, enable, restart etc) are exposed
+under /sys/kernel/xroe. Furthermore, specific controls can be found under
+/sys/kernel/xroe/framer. These include protocol-specific settings, for
+IPv4, IPv6 & UDP.
+
+There is also the option of accessing the framer's register map using
+ioctl calls for both reading and writing (where permitted) directly.
diff --git a/drivers/staging/xroeframer/roe_framer_ctrl.h b/drivers/staging/xroeframer/roe_framer_ctrl.h
new file mode 100644
index 000000000000..162c49a9bc3b
--- /dev/null
+++ b/drivers/staging/xroeframer/roe_framer_ctrl.h
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+/*-----------------------------------------------------------------------------
+ * C Header bank BASE definitions
+ *-----------------------------------------------------------------------------
+ */
+#define ROE_FRAMER_V1_0_CFG_BASE_ADDR 0x0 /* 0 */
+#define ROE_FRAMER_V1_0_FRAM_BASE_ADDR 0x2000 /* 8192 */
+#define ROE_FRAMER_V1_0_FRAM_DRP_BASE_ADDR 0x4000 /* 16384 */
+#define ROE_FRAMER_V1_0_DEFM_BASE_ADDR 0x6000 /* 24576 */
+#define ROE_FRAMER_V1_0_DEFM_DRP_BASE_ADDR 0x8000 /* 32768 */
+#define ROE_FRAMER_V1_0_ETH_BASE_ADDR 0xa000 /* 40960 */
+#define ROE_FRAMER_V1_0_STATS_BASE_ADDR 0xc000 /* 49152 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_cfg
+ * with prefix cfg_ @ address 0x0
+ *-----------------------------------------------------------------------------
+ */
+/* Type = roInt */
+#define CFG_MAJOR_REVISION_ADDR 0x0 /* 0 */
+#define CFG_MAJOR_REVISION_MASK 0xff000000 /* 4278190080 */
+#define CFG_MAJOR_REVISION_OFFSET 0x18 /* 24 */
+#define CFG_MAJOR_REVISION_WIDTH 0x8 /* 8 */
+#define CFG_MAJOR_REVISION_DEFAULT 0x1 /* 1 */
+
+/* Type = roInt */
+#define CFG_MINOR_REVISION_ADDR 0x0 /* 0 */
+#define CFG_MINOR_REVISION_MASK 0xff0000 /* 16711680 */
+#define CFG_MINOR_REVISION_OFFSET 0x10 /* 16 */
+#define CFG_MINOR_REVISION_WIDTH 0x8 /* 8 */
+#define CFG_MINOR_REVISION_DEFAULT 0x0 /* 0 */
+
+/* Type = roInt */
+#define CFG_VERSION_REVISION_ADDR 0x0 /* 0 */
+#define CFG_VERSION_REVISION_MASK 0xff00 /* 65280 */
+#define CFG_VERSION_REVISION_OFFSET 0x8 /* 8 */
+#define CFG_VERSION_REVISION_WIDTH 0x8 /* 8 */
+#define CFG_VERSION_REVISION_DEFAULT 0x0 /* 0 */
+
+/* Type = roInt */
+#define CFG_INTERNAL_REVISION_ADDR 0x4 /* 4 */
+#define CFG_INTERNAL_REVISION_MASK 0xffffffff /* 4294967295 */
+#define CFG_INTERNAL_REVISION_OFFSET 0x0 /* 0 */
+#define CFG_INTERNAL_REVISION_WIDTH 0x20 /* 32 */
+#define CFG_INTERNAL_REVISION_DEFAULT 0x12345678 /* 305419896 */
+
+/* Type = rw */
+#define CFG_TIMEOUT_VALUE_ADDR 0x8 /* 8 */
+#define CFG_TIMEOUT_VALUE_MASK 0xfff /* 4095 */
+#define CFG_TIMEOUT_VALUE_OFFSET 0x0 /* 0 */
+#define CFG_TIMEOUT_VALUE_WIDTH 0xc /* 12 */
+#define CFG_TIMEOUT_VALUE_DEFAULT 0x80 /* 128 */
+
+/* Type = rw */
+#define CFG_USER_RW_OUT_ADDR 0xc /* 12 */
+#define CFG_USER_RW_OUT_MASK 0xff /* 255 */
+#define CFG_USER_RW_OUT_OFFSET 0x0 /* 0 */
+#define CFG_USER_RW_OUT_WIDTH 0x8 /* 8 */
+#define CFG_USER_RW_OUT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_USER_RO_IN_ADDR 0xc /* 12 */
+#define CFG_USER_RO_IN_MASK 0xff0000 /* 16711680 */
+#define CFG_USER_RO_IN_OFFSET 0x10 /* 16 */
+#define CFG_USER_RO_IN_WIDTH 0x8 /* 8 */
+#define CFG_USER_RO_IN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_MASTER_INT_ENABLE_ADDR 0x10 /* 16 */
+#define CFG_MASTER_INT_ENABLE_MASK 0x1 /* 1 */
+#define CFG_MASTER_INT_ENABLE_OFFSET 0x0 /* 0 */
+#define CFG_MASTER_INT_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_MASTER_INT_ENABLE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_FRAM_FIFO_OF_ENABLE_ADDR 0x14 /* 20 */
+#define CFG_FRAM_FIFO_OF_ENABLE_MASK 0x1 /* 1 */
+#define CFG_FRAM_FIFO_OF_ENABLE_OFFSET 0x0 /* 0 */
+#define CFG_FRAM_FIFO_OF_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_FIFO_OF_ENABLE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_FRAM_FIFO_UF_ENABLE_ADDR 0x14 /* 20 */
+#define CFG_FRAM_FIFO_UF_ENABLE_MASK 0x2 /* 2 */
+#define CFG_FRAM_FIFO_UF_ENABLE_OFFSET 0x1 /* 1 */
+#define CFG_FRAM_FIFO_UF_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_FIFO_UF_ENABLE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_AXI_TIMEOUT_ENABLE_ADDR 0x14 /* 20 */
+#define CFG_AXI_TIMEOUT_ENABLE_MASK 0x80000000 /* 2147483648 */
+#define CFG_AXI_TIMEOUT_ENABLE_OFFSET 0x1f /* 31 */
+#define CFG_AXI_TIMEOUT_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_AXI_TIMEOUT_ENABLE_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define CFG_INTERRUPT_STATUS_SAMPLE_ADDR 0x1c /* 28 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_MASK 0x1 /* 1 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_OFFSET 0x0 /* 0 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_WIDTH 0x1 /* 1 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_DEFAULT 0x1 /* 1 */
+
+/* Type = roSig */
+#define CFG_FRAM_RESET_STATUS_ADDR 0x18 /* 24 */
+#define CFG_FRAM_RESET_STATUS_MASK 0x1 /* 1 */
+#define CFG_FRAM_RESET_STATUS_OFFSET 0x0 /* 0 */
+#define CFG_FRAM_RESET_STATUS_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_RESET_STATUS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_DEFM_RESET_STATUS_ADDR 0x18 /* 24 */
+#define CFG_DEFM_RESET_STATUS_MASK 0x2 /* 2 */
+#define CFG_DEFM_RESET_STATUS_OFFSET 0x1 /* 1 */
+#define CFG_DEFM_RESET_STATUS_WIDTH 0x1 /* 1 */
+#define CFG_DEFM_RESET_STATUS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ANT_OF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_MASK 0x100 /* 256 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_OFFSET 0x8 /* 8 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ETH_OF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_MASK 0x200 /* 512 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_OFFSET 0x9 /* 9 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ANT_UF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_MASK 0x400 /* 1024 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_OFFSET 0xa /* 10 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ETH_UF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_MASK 0x800 /* 2048 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_OFFSET 0xb /* 11 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_AXI_TIMEOUT_STATUS_ADDR 0x18 /* 24 */
+#define CFG_AXI_TIMEOUT_STATUS_MASK 0x80000000 /* 2147483648 */
+#define CFG_AXI_TIMEOUT_STATUS_OFFSET 0x1f /* 31 */
+#define CFG_AXI_TIMEOUT_STATUS_WIDTH 0x1 /* 1 */
+#define CFG_AXI_TIMEOUT_STATUS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_ADDR 0x20 /* 32 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_MASK 0xffff /* 65535 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_OFFSET 0x0 /* 0 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_WIDTH 0x10 /* 16 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_ADDR 0x20 /* 32 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_MASK 0xffff0000 /* 4294901760 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_OFFSET 0x10 /* 16 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_WIDTH 0x10 /* 16 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_ADDR 0x24 /* 36 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_MASK 0x3ff /* 1023 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_OFFSET 0x0 /* 0 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_WIDTH 0xa /* 10 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_ETH_SPEED_ADDR 0x24 /* 36 */
+#define CFG_CONFIG_ETH_SPEED_MASK 0x3ff0000 /* 67043328 */
+#define CFG_CONFIG_ETH_SPEED_OFFSET 0x10 /* 16 */
+#define CFG_CONFIG_ETH_SPEED_WIDTH 0xa /* 10 */
+#define CFG_CONFIG_ETH_SPEED_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_fram
+ * with prefix fram_ @ address 0x2000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rwpdef */
+#define FRAM_DISABLE_ADDR 0x2000 /* 8192 */
+#define FRAM_DISABLE_MASK 0x1 /* 1 */
+#define FRAM_DISABLE_OFFSET 0x0 /* 0 */
+#define FRAM_DISABLE_WIDTH 0x1 /* 1 */
+#define FRAM_DISABLE_DEFAULT 0x1 /* 1 */
+
+/* Type = roSig */
+#define FRAM_READY_ADDR 0x2000 /* 8192 */
+#define FRAM_READY_MASK 0x2 /* 2 */
+#define FRAM_READY_OFFSET 0x1 /* 1 */
+#define FRAM_READY_WIDTH 0x1 /* 1 */
+#define FRAM_READY_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define FRAM_FIFO_FULL_INDICATOR_ADDR 0x2004 /* 8196 */
+#define FRAM_FIFO_FULL_INDICATOR_MASK 0xffffffff /* 4294967295 */
+#define FRAM_FIFO_FULL_INDICATOR_OFFSET 0x0 /* 0 */
+#define FRAM_FIFO_FULL_INDICATOR_WIDTH 0x20 /* 32 */
+#define FRAM_FIFO_FULL_INDICATOR_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_MIN_ADDR 0x2020 /* 8224 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_MAX_ADDR 0x2024 /* 8228 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_ADDR 0x2028 /* 8232 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_DEFAULT 0x75 /* 117 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_ADDR 0x202c /* 8236 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_ADDR 0x2030 /* 8240 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_ADDR 0x2034 /* 8244 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_ADDR 0x2038 /* 8248 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_ADDR 0x203c /* 8252 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_ADDR 0x2050 /* 8272 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_ADDR 0x2054 /* 8276 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_ADDR 0x2058 /* 8280 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_DEFAULT 0x75 /* 117 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_ADDR 0x205c /* 8284 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_ADDR 0x2060 /* 8288 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_ADDR 0x2064 /* 8292 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_ADDR 0x2068 /* 8296 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_ADDR 0x206c /* 8300 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_PROTOCOL_DEFINITION_ADDR 0x2200 /* 8704 */
+#define FRAM_PROTOCOL_DEFINITION_MASK 0xf /* 15 */
+#define FRAM_PROTOCOL_DEFINITION_OFFSET 0x0 /* 0 */
+#define FRAM_PROTOCOL_DEFINITION_WIDTH 0x4 /* 4 */
+#define FRAM_PROTOCOL_DEFINITION_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_GEN_VLAN_TAG_ADDR 0x2200 /* 8704 */
+#define FRAM_GEN_VLAN_TAG_MASK 0x10 /* 16 */
+#define FRAM_GEN_VLAN_TAG_OFFSET 0x4 /* 4 */
+#define FRAM_GEN_VLAN_TAG_WIDTH 0x1 /* 1 */
+#define FRAM_GEN_VLAN_TAG_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_ADDR 0x2200 /* 8704 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_MASK 0x60 /* 96 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_OFFSET 0x5 /* 5 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_WIDTH 0x2 /* 2 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_fram_drp
+ * with prefix fram_drp @ address 0x4000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rw */
+#define FRAM_DRPFRAM_DATA_PC_ID_ADDR 0x4000 /* 16384 */
+#define FRAM_DRPFRAM_DATA_PC_ID_MASK 0xffff /* 65535 */
+#define FRAM_DRPFRAM_DATA_PC_ID_OFFSET 0x0 /* 0 */
+#define FRAM_DRPFRAM_DATA_PC_ID_WIDTH 0x10 /* 16 */
+#define FRAM_DRPFRAM_DATA_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_ADDR 0x4000 /* 16384 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_MASK 0xff0000 /* 16711680 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_OFFSET 0x10 /* 16 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_ADDR 0x4000 /* 16384 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_MASK 0xff000000 /* 4278190080 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_OFFSET 0x18 /* 24 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_CTRL_PC_ID_ADDR 0x4400 /* 17408 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_MASK 0xffff /* 65535 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_OFFSET 0x0 /* 0 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_WIDTH 0x10 /* 16 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_ADDR 0x4400 /* 17408 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_MASK 0xff0000 /* 16711680 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_OFFSET 0x10 /* 16 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_ADDR 0x4400 /* 17408 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_MASK 0xff000000 /* 4278190080 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_OFFSET 0x18 /* 24 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_defm
+ * with prefix defm_ @ address 0x6000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rw */
+#define DEFM_RESTART_ADDR 0x6000 /* 24576 */
+#define DEFM_RESTART_MASK 0x1 /* 1 */
+#define DEFM_RESTART_OFFSET 0x0 /* 0 */
+#define DEFM_RESTART_WIDTH 0x1 /* 1 */
+#define DEFM_RESTART_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_READY_ADDR 0x6000 /* 24576 */
+#define DEFM_READY_MASK 0x2 /* 2 */
+#define DEFM_READY_OFFSET 0x1 /* 1 */
+#define DEFM_READY_WIDTH 0x1 /* 1 */
+#define DEFM_READY_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_ERR_PACKET_FILTER_ADDR 0x6004 /* 24580 */
+#define DEFM_ERR_PACKET_FILTER_MASK 0x3 /* 3 */
+#define DEFM_ERR_PACKET_FILTER_OFFSET 0x0 /* 0 */
+#define DEFM_ERR_PACKET_FILTER_WIDTH 0x2 /* 2 */
+#define DEFM_ERR_PACKET_FILTER_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_ADDR 0x6008 /* 24584 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_MASK 0xff /* 255 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_OFFSET 0x0 /* 0 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_ADDR 0x600c /* 24588 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_MASK 0xff /* 255 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_OFFSET 0x0 /* 0 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_LOW_CNT_MIN_ADDR 0x6020 /* 24608 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_LOW_CNT_MAX_ADDR 0x6024 /* 24612 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_ADDR 0x602c /* 24620 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_ADDR 0x6030 /* 24624 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_ADDR 0x6034 /* 24628 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_ADDR 0x603c /* 24636 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_ADDR 0x6050 /* 24656 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_ADDR 0x6054 /* 24660 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_ADDR 0x605c /* 24668 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_ADDR 0x6060 /* 24672 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_ADDR 0x6064 /* 24676 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_ADDR 0x606c /* 24684 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_31_0_ADDR 0x6100 /* 24832 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_63_32_ADDR 0x6104 /* 24836 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_95_64_ADDR 0x6108 /* 24840 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_127_96_ADDR 0x610c /* 24844 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_DEFAULT 0xfffffeae /* 4294966958 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_MASK_ADDR 0x6110 /* 24848 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_DEFAULT 0xcfff /* 53247 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_31_0_ADDR 0x6120 /* 24864 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_63_32_ADDR 0x6124 /* 24868 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_95_64_ADDR 0x6128 /* 24872 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_127_96_ADDR 0x612c /* 24876 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_MASK_ADDR 0x6130 /* 24880 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_DEFAULT 0xffff /* 65535 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_31_0_ADDR 0x6140 /* 24896 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_63_32_ADDR 0x6144 /* 24900 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_95_64_ADDR 0x6148 /* 24904 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_127_96_ADDR 0x614c /* 24908 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_MASK_ADDR 0x6150 /* 24912 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_DEFAULT 0xffff /* 65535 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_31_0_ADDR 0x6160 /* 24928 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_63_32_ADDR 0x6164 /* 24932 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_95_64_ADDR 0x6168 /* 24936 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_127_96_ADDR 0x616c /* 24940 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_MASK_ADDR 0x6170 /* 24944 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_DEFAULT 0xffff /* 65535 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_defm_drp
+ * with prefix defm_drp @ address 0x8000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rw */
+#define DEFM_DRPDEFM_DATA_PC_ID_ADDR 0x8000 /* 32768 */
+#define DEFM_DRPDEFM_DATA_PC_ID_MASK 0xffff /* 65535 */
+#define DEFM_DRPDEFM_DATA_PC_ID_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_DATA_PC_ID_WIDTH 0x10 /* 16 */
+#define DEFM_DRPDEFM_DATA_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_DRPDEFM_CTRL_PC_ID_ADDR 0x8400 /* 33792 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_MASK 0xffff /* 65535 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_WIDTH 0x10 /* 16 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_MASK 0xffffff /* 16777215 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_WIDTH 0x18 /* 24 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_MASK 0x1000000 /* 16777216 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_OFFSET 0x18 /* 24 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_MASK 0x2000000 /* 33554432 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_OFFSET 0x19 /* 25 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_MASK 0x4000000 /* 67108864 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_OFFSET 0x1a /* 26 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_MASK 0x8000000 /* 134217728 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_OFFSET 0x1b /* 27 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_MASK 0xf0000000 /* 4026531840 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_OFFSET 0x1c /* 28 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_WIDTH 0x4 /* 4 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_MASK 0xffffff /* 16777215 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_WIDTH 0x18 /* 24 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_MASK 0x1000000 /* 16777216 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_OFFSET 0x18 /* 24 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_MASK 0x2000000 /* 33554432 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_OFFSET 0x19 /* 25 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_MASK 0x4000000 /* 67108864 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_OFFSET 0x1a /* 26 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_MASK 0x8000000 /* 134217728 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_OFFSET 0x1b /* 27 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_MASK 0xf0000000 /* 4026531840 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_OFFSET 0x1c /* 28 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_WIDTH 0x4 /* 4 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_eth
+ * with prefix eth_ @ address 0xa000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rwpdef */
+#define ETH_DEST_ADDR_31_0_ADDR 0xa000 /* 40960 */
+#define ETH_DEST_ADDR_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_DEST_ADDR_31_0_OFFSET 0x0 /* 0 */
+#define ETH_DEST_ADDR_31_0_WIDTH 0x20 /* 32 */
+#define ETH_DEST_ADDR_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_DEST_ADDR_47_32_ADDR 0xa004 /* 40964 */
+#define ETH_DEST_ADDR_47_32_MASK 0xffff /* 65535 */
+#define ETH_DEST_ADDR_47_32_OFFSET 0x0 /* 0 */
+#define ETH_DEST_ADDR_47_32_WIDTH 0x10 /* 16 */
+#define ETH_DEST_ADDR_47_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_SRC_ADDR_31_0_ADDR 0xa008 /* 40968 */
+#define ETH_SRC_ADDR_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_SRC_ADDR_31_0_OFFSET 0x0 /* 0 */
+#define ETH_SRC_ADDR_31_0_WIDTH 0x20 /* 32 */
+#define ETH_SRC_ADDR_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_SRC_ADDR_47_32_ADDR 0xa00c /* 40972 */
+#define ETH_SRC_ADDR_47_32_MASK 0xffff /* 65535 */
+#define ETH_SRC_ADDR_47_32_OFFSET 0x0 /* 0 */
+#define ETH_SRC_ADDR_47_32_WIDTH 0x10 /* 16 */
+#define ETH_SRC_ADDR_47_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_VLAN_ID_ADDR 0xa010 /* 40976 */
+#define ETH_VLAN_ID_MASK 0xfff /* 4095 */
+#define ETH_VLAN_ID_OFFSET 0x0 /* 0 */
+#define ETH_VLAN_ID_WIDTH 0xc /* 12 */
+#define ETH_VLAN_ID_DEFAULT 0x1 /* 1 */
+
+/* Type = rwpdef */
+#define ETH_VLAN_DEI_ADDR 0xa010 /* 40976 */
+#define ETH_VLAN_DEI_MASK 0x1000 /* 4096 */
+#define ETH_VLAN_DEI_OFFSET 0xc /* 12 */
+#define ETH_VLAN_DEI_WIDTH 0x1 /* 1 */
+#define ETH_VLAN_DEI_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_VLAN_PCP_ADDR 0xa010 /* 40976 */
+#define ETH_VLAN_PCP_MASK 0xe000 /* 57344 */
+#define ETH_VLAN_PCP_OFFSET 0xd /* 13 */
+#define ETH_VLAN_PCP_WIDTH 0x3 /* 3 */
+#define ETH_VLAN_PCP_DEFAULT 0x7 /* 7 */
+
+/* Type = rw */
+#define ETH_IPV4_VERSION_ADDR 0xa030 /* 41008 */
+#define ETH_IPV4_VERSION_MASK 0xf /* 15 */
+#define ETH_IPV4_VERSION_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_VERSION_WIDTH 0x4 /* 4 */
+#define ETH_IPV4_VERSION_DEFAULT 0x4 /* 4 */
+
+/* Type = rw */
+#define ETH_IPV4_IHL_ADDR 0xa030 /* 41008 */
+#define ETH_IPV4_IHL_MASK 0xf0 /* 240 */
+#define ETH_IPV4_IHL_OFFSET 0x4 /* 4 */
+#define ETH_IPV4_IHL_WIDTH 0x4 /* 4 */
+#define ETH_IPV4_IHL_DEFAULT 0x5 /* 5 */
+
+/* Type = rw */
+#define ETH_IPV4_DSCP_ADDR 0xa034 /* 41012 */
+#define ETH_IPV4_DSCP_MASK 0x3f /* 63 */
+#define ETH_IPV4_DSCP_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_DSCP_WIDTH 0x6 /* 6 */
+#define ETH_IPV4_DSCP_DEFAULT 0x2e /* 46 */
+
+/* Type = rw */
+#define ETH_IPV4_ECN_ADDR 0xa034 /* 41012 */
+#define ETH_IPV4_ECN_MASK 0xc0 /* 192 */
+#define ETH_IPV4_ECN_OFFSET 0x6 /* 6 */
+#define ETH_IPV4_ECN_WIDTH 0x2 /* 2 */
+#define ETH_IPV4_ECN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV4_ID_ADDR 0xa038 /* 41016 */
+#define ETH_IPV4_ID_MASK 0xffff /* 65535 */
+#define ETH_IPV4_ID_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_ID_WIDTH 0x10 /* 16 */
+#define ETH_IPV4_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV4_FLAGS_ADDR 0xa03c /* 41020 */
+#define ETH_IPV4_FLAGS_MASK 0x7 /* 7 */
+#define ETH_IPV4_FLAGS_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_FLAGS_WIDTH 0x3 /* 3 */
+#define ETH_IPV4_FLAGS_DEFAULT 0x2 /* 2 */
+
+/* Type = rw */
+#define ETH_IPV4_FRAGMENT_OFFSET_ADDR 0xa03c /* 41020 */
+#define ETH_IPV4_FRAGMENT_OFFSET_MASK 0x1fff8 /* 131064 */
+#define ETH_IPV4_FRAGMENT_OFFSET_OFFSET 0x3 /* 3 */
+#define ETH_IPV4_FRAGMENT_OFFSET_WIDTH 0xe /* 14 */
+#define ETH_IPV4_FRAGMENT_OFFSET_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV4_TIME_TO_LIVE_ADDR 0xa040 /* 41024 */
+#define ETH_IPV4_TIME_TO_LIVE_MASK 0xff /* 255 */
+#define ETH_IPV4_TIME_TO_LIVE_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_TIME_TO_LIVE_WIDTH 0x8 /* 8 */
+#define ETH_IPV4_TIME_TO_LIVE_DEFAULT 0x40 /* 64 */
+
+/* Type = rw */
+#define ETH_IPV4_PROTOCOL_ADDR 0xa044 /* 41028 */
+#define ETH_IPV4_PROTOCOL_MASK 0xff /* 255 */
+#define ETH_IPV4_PROTOCOL_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_PROTOCOL_WIDTH 0x8 /* 8 */
+#define ETH_IPV4_PROTOCOL_DEFAULT 0x11 /* 17 */
+
+/* Type = rwpdef */
+#define ETH_IPV4_SOURCE_ADD_ADDR 0xa048 /* 41032 */
+#define ETH_IPV4_SOURCE_ADD_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV4_SOURCE_ADD_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_SOURCE_ADD_WIDTH 0x20 /* 32 */
+#define ETH_IPV4_SOURCE_ADD_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV4_DESTINATION_ADD_ADDR 0xa04c /* 41036 */
+#define ETH_IPV4_DESTINATION_ADD_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV4_DESTINATION_ADD_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_DESTINATION_ADD_WIDTH 0x20 /* 32 */
+#define ETH_IPV4_DESTINATION_ADD_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_UDP_SOURCE_PORT_ADDR 0xa050 /* 41040 */
+#define ETH_UDP_SOURCE_PORT_MASK 0xffff /* 65535 */
+#define ETH_UDP_SOURCE_PORT_OFFSET 0x0 /* 0 */
+#define ETH_UDP_SOURCE_PORT_WIDTH 0x10 /* 16 */
+#define ETH_UDP_SOURCE_PORT_DEFAULT 0x8000 /* 32768 */
+
+/* Type = rw */
+#define ETH_UDP_DESTINATION_PORT_ADDR 0xa050 /* 41040 */
+#define ETH_UDP_DESTINATION_PORT_MASK 0xffff0000 /* 4294901760 */
+#define ETH_UDP_DESTINATION_PORT_OFFSET 0x10 /* 16 */
+#define ETH_UDP_DESTINATION_PORT_WIDTH 0x10 /* 16 */
+#define ETH_UDP_DESTINATION_PORT_DEFAULT 0xc000 /* 49152 */
+
+/* Type = rw */
+#define ETH_IPV6_V_ADDR 0xa080 /* 41088 */
+#define ETH_IPV6_V_MASK 0xf /* 15 */
+#define ETH_IPV6_V_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_V_WIDTH 0x4 /* 4 */
+#define ETH_IPV6_V_DEFAULT 0x6 /* 6 */
+
+/* Type = rw */
+#define ETH_IPV6_TRAFFIC_CLASS_ADDR 0xa084 /* 41092 */
+#define ETH_IPV6_TRAFFIC_CLASS_MASK 0xff /* 255 */
+#define ETH_IPV6_TRAFFIC_CLASS_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_TRAFFIC_CLASS_WIDTH 0x8 /* 8 */
+#define ETH_IPV6_TRAFFIC_CLASS_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV6_FLOW_LABEL_ADDR 0xa088 /* 41096 */
+#define ETH_IPV6_FLOW_LABEL_MASK 0xfffff /* 1048575 */
+#define ETH_IPV6_FLOW_LABEL_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_FLOW_LABEL_WIDTH 0x14 /* 20 */
+#define ETH_IPV6_FLOW_LABEL_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV6_NEXT_HEADER_ADDR 0xa08c /* 41100 */
+#define ETH_IPV6_NEXT_HEADER_MASK 0xff /* 255 */
+#define ETH_IPV6_NEXT_HEADER_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_NEXT_HEADER_WIDTH 0x8 /* 8 */
+#define ETH_IPV6_NEXT_HEADER_DEFAULT 0x11 /* 17 */
+
+/* Type = rw */
+#define ETH_IPV6_HOP_LIMIT_ADDR 0xa090 /* 41104 */
+#define ETH_IPV6_HOP_LIMIT_MASK 0xff /* 255 */
+#define ETH_IPV6_HOP_LIMIT_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_HOP_LIMIT_WIDTH 0x8 /* 8 */
+#define ETH_IPV6_HOP_LIMIT_DEFAULT 0x40 /* 64 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_31_0_ADDR 0xa094 /* 41108 */
+#define ETH_IPV6_SOURCE_ADD_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_31_0_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_31_0_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_63_32_ADDR 0xa098 /* 41112 */
+#define ETH_IPV6_SOURCE_ADD_63_32_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_63_32_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_63_32_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_63_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_95_64_ADDR 0xa09c /* 41116 */
+#define ETH_IPV6_SOURCE_ADD_95_64_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_95_64_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_95_64_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_95_64_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_127_96_ADDR 0xa0a0 /* 41120 */
+#define ETH_IPV6_SOURCE_ADD_127_96_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_127_96_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_127_96_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_127_96_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_31_0_ADDR 0xa0a4 /* 41124 */
+#define ETH_IPV6_DEST_ADD_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_31_0_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_31_0_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_63_32_ADDR 0xa0a8 /* 41128 */
+#define ETH_IPV6_DEST_ADD_63_32_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_63_32_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_63_32_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_63_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_95_64_ADDR 0xa0ac /* 41132 */
+#define ETH_IPV6_DEST_ADD_95_64_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_95_64_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_95_64_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_95_64_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_127_96_ADDR 0xa0b0 /* 41136 */
+#define ETH_IPV6_DEST_ADD_127_96_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_127_96_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_127_96_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_127_96_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_stats
+ * with prefix stats_ @ address 0xc000
+ *------------------------------------------------------------------------------
+ */
+/* Type = roSig */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_ADDR 0xc000 /* 49152 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_ADDR 0xc004 /* 49156 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_ADDR 0xc008 /* 49160 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_PACKETS_CNT_ADDR 0xc00c /* 49164 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_ADDR 0xc010 /* 49168 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_ADDR 0xc014 /* 49172 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_ADDR 0xc018 /* 49176 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_ADDR 0xc01c /* 49180 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_ADDR 0xc020 /* 49184 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_ADDR 0xc024 /* 49188 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_ADDR 0xc028 /* 49192 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_PKTS_RATE_ADDR 0xc02c /* 49196 */
+#define STATS_USER_DATA_RX_PKTS_RATE_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_PKTS_RATE_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_PKTS_RATE_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_PKTS_RATE_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_PKTS_RATE_ADDR 0xc030 /* 49200 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_DEFAULT 0x0 /* 0 */
diff --git a/drivers/staging/xroeframer/sysfs_xroe.c b/drivers/staging/xroeframer/sysfs_xroe.c
new file mode 100644
index 000000000000..9caf5e50b02f
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 15 };
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+/**
+ * version_show - Returns the block's revision number
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the revision string
+ *
+ * Returns the block's major, minor & version revision numbers
+ * in a %d.%d.%d format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 major_rev;
+ u32 minor_rev;
+ u32 version_rev;
+
+ major_rev = utils_sysfs_show_wrapper(CFG_MAJOR_REVISION_ADDR,
+ CFG_MAJOR_REVISION_OFFSET,
+ CFG_MAJOR_REVISION_MASK, kobj);
+ minor_rev = utils_sysfs_show_wrapper(CFG_MINOR_REVISION_ADDR,
+ CFG_MINOR_REVISION_OFFSET,
+ CFG_MINOR_REVISION_MASK, kobj);
+ version_rev = utils_sysfs_show_wrapper(CFG_VERSION_REVISION_ADDR,
+ CFG_VERSION_REVISION_OFFSET,
+ CFG_VERSION_REVISION_MASK, kobj);
+ sprintf(buff, "%d.%d.%d\n", major_rev, minor_rev, version_rev);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * version_store - Writes to the framer version sysfs entry (not permitted)
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the revision string
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the framer version sysfs entry (not permitted)
+ *
+ * Return: 0
+ */
+static ssize_t version_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ return 0;
+}
+
+/**
+ * enable_show - Returns the framer's enable status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the enable status
+ *
+ * Reads and writes the framer's enable status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t enable_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 enable;
+
+ enable = utils_sysfs_show_wrapper(CFG_MASTER_INT_ENABLE_ADDR,
+ CFG_MASTER_INT_ENABLE_OFFSET,
+ CFG_MASTER_INT_ENABLE_MASK, kobj);
+ if (enable)
+ sprintf(buff, "true\n");
+ else
+ sprintf(buff, "false\n");
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * version_store - Writes to the framer's enable status register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the enable status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the framer's enable status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t enable_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 enable = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0)
+ enable = 1;
+ else if (strncmp(xroe_tmp, "false", xroe_size) == 0)
+ enable = 0;
+ utils_sysfs_store_wrapper(CFG_MASTER_INT_ENABLE_ADDR,
+ CFG_MASTER_INT_ENABLE_OFFSET,
+ CFG_MASTER_INT_ENABLE_MASK, enable, kobj);
+ return xroe_size;
+}
+
+/**
+ * framer_restart_show - Returns the framer's restart status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ *
+ * Reads and writes the framer's restart status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t framer_restart_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 restart;
+
+ restart = utils_sysfs_show_wrapper(FRAM_DISABLE_ADDR,
+ FRAM_DISABLE_OFFSET,
+ FRAM_DISABLE_MASK, kobj);
+ if (restart)
+ sprintf(buff, "true\n");
+
+ else
+ sprintf(buff, "false\n");
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * framer_restart_store - Writes to the framer's restart status register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the framer's restart status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t framer_restart_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 restart = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0)
+ restart = 0x01;
+ else if (strncmp(xroe_tmp, "false", xroe_size) == 0)
+ restart = 0x00;
+ utils_sysfs_store_wrapper(FRAM_DISABLE_ADDR, FRAM_DISABLE_OFFSET,
+ FRAM_DISABLE_MASK, restart, kobj);
+ return xroe_size;
+}
+
+/**
+ * deframer_restart_show - Returns the deframer's restart status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ *
+ * Reads and writes the deframer's restart status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t deframer_restart_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 offset = DEFM_RESTART_OFFSET;
+ u32 mask = DEFM_RESTART_MASK;
+ u32 buffer = 0;
+ u32 restart = 0;
+ void __iomem *working_address = ((u8 *)lp->base_addr
+ + DEFM_RESTART_ADDR);
+
+ buffer = ioread32(working_address);
+ restart = (buffer & mask) >> offset;
+
+ if (restart)
+ sprintf(buff, "true\n");
+
+ else
+ sprintf(buff, "false\n");
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * deframer_restart_store - Writes to the deframer's restart status register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the deframer's restart status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t deframer_restart_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 offset = DEFM_RESTART_OFFSET;
+ u32 mask = DEFM_RESTART_MASK;
+ void __iomem *working_address = ((u8 *)lp->base_addr
+ + DEFM_RESTART_ADDR);
+ u32 restart = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0) {
+ restart = 0x01;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ } else if (strncmp(xroe_tmp, "false", xroe_size) == 0) {
+ restart = 0x00;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ }
+
+ return xroe_size;
+}
+
+/**
+ * xxv_reset_show - Returns the XXV's reset status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ *
+ * Reads and writes the XXV's reset status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t xxv_reset_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 offset = CFG_USER_RW_OUT_OFFSET;
+ u32 mask = CFG_USER_RW_OUT_MASK;
+ u32 buffer = 0;
+ u32 restart = 0;
+ void __iomem *working_address = ((u8 *)lp->base_addr +
+ CFG_USER_RW_OUT_ADDR);
+
+ buffer = ioread32(working_address);
+ restart = (buffer & mask) >> offset;
+ if (restart)
+ sprintf(buff, "true\n");
+ else
+ sprintf(buff, "false\n");
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * xxv_reset_store - Writes to the XXV's reset register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the XXV's reset status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t xxv_reset_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 offset = CFG_USER_RW_OUT_OFFSET;
+ u32 mask = CFG_USER_RW_OUT_MASK;
+ void __iomem *working_address = ((u8 *)lp->base_addr +
+ CFG_USER_RW_OUT_ADDR);
+ u32 restart = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0) {
+ restart = 0x01;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ } else if (strncmp(xroe_tmp, "false", xroe_size) == 0) {
+ restart = 0x00;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ }
+ return xroe_size;
+}
+
+/**
+ * framing_show - Returns the current framing
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ *
+ * Reads and writes the current framing type to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t framing_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 offset = (DEFM_DATA_PKT_MESSAGE_TYPE_ADDR +
+ DEFM_DATA_PKT_MESSAGE_TYPE_OFFSET);
+ u8 buffer = 0;
+ u8 framing = 0xff;
+ void __iomem *working_address = ((u8 *)lp->base_addr + offset);
+
+ buffer = ioread8(working_address);
+ framing = buffer;
+ if (framing == 0)
+ sprintf(buff, "eCPRI\n");
+ else if (framing == 1)
+ sprintf(buff, "1914.3\n");
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * framing_store - Writes to the current framing register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the current framing
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t framing_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 offset = (DEFM_DATA_PKT_MESSAGE_TYPE_ADDR +
+ DEFM_DATA_PKT_MESSAGE_TYPE_OFFSET);
+ void __iomem *working_address = ((u8 *)lp->base_addr + offset);
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "eCPRI", xroe_size) == 0)
+ iowrite8(0, working_address);
+ else if (strncmp(xroe_tmp, "1914.3", xroe_size) == 0)
+ iowrite8(1, working_address);
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute version_attribute =
+ __ATTR(version, 0444, version_show, version_store);
+
+static struct kobj_attribute enable_attribute =
+ __ATTR(enable, 0660, enable_show, enable_store);
+
+static struct kobj_attribute framer_restart =
+ __ATTR(framer_restart, 0660, framer_restart_show, framer_restart_store);
+
+static struct kobj_attribute deframer_restart =
+ __ATTR(deframer_restart, 0660, deframer_restart_show,
+ deframer_restart_store);
+
+static struct kobj_attribute xxv_reset =
+ __ATTR(xxv_reset, 0660, xxv_reset_show, xxv_reset_store);
+
+static struct kobj_attribute framing_attribute =
+ __ATTR(framing, 0660, framing_show, framing_store);
+
+static struct attribute *attrs[] = {
+ &version_attribute.attr,
+ &enable_attribute.attr,
+ &framer_restart.attr,
+ &deframer_restart.attr,
+ &xxv_reset.attr,
+ &framing_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+struct kobject *root_xroe_kobj;
+
+/**
+ * xroe_sysfs_init - Creates the xroe sysfs directory and entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs directory and entries, as well as the
+ * subdirectories for IPv4, IPv6 & UDP
+ */
+int xroe_sysfs_init(void)
+{
+ int ret;
+
+ root_xroe_kobj = kobject_create_and_add("xroe", kernel_kobj);
+ if (!root_xroe_kobj)
+ return -ENOMEM;
+ ret = sysfs_create_group(root_xroe_kobj, &attr_group);
+ if (ret)
+ kobject_put(root_xroe_kobj);
+ ret = xroe_sysfs_ipv4_init();
+ if (ret)
+ return ret;
+ ret = xroe_sysfs_ipv6_init();
+ if (ret)
+ return ret;
+ ret = xroe_sysfs_udp_init();
+ if (ret)
+ return ret;
+ ret = xroe_sysfs_stats_init();
+ return ret;
+}
+
+/**
+ * xroe_sysfs_exit - Deletes the xroe sysfs directory and entries
+ *
+ * Deletes the xroe sysfs directory and entries, as well as the
+ * subdirectories for IPv4, IPv6 & UDP
+ *
+ */
+void xroe_sysfs_exit(void)
+{
+ int i;
+
+ xroe_sysfs_ipv4_exit();
+ xroe_sysfs_ipv6_exit();
+ xroe_sysfs_udp_exit();
+ xroe_sysfs_stats_exit();
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_eth_ports[i]);
+ kobject_put(kobj_framer);
+ kobject_put(root_xroe_kobj);
+}
+
+/**
+ * utils_write32withmask - Writes a masked 32-bit value
+ * @working_address: The starting address to write
+ * @value: The value to be written
+ * @mask: The mask to be used
+ * @offset: The offset from the provided starting address
+ *
+ * Writes a 32-bit value to the provided address with the input mask
+ *
+ * Return: 0 on success
+ */
+int utils_write32withmask(void __iomem *working_address, u32 value,
+ u32 mask, u32 offset)
+{
+ u32 read_register_value = 0;
+ u32 register_value_to_write = 0;
+ u32 delta = 0, buffer = 0;
+
+ read_register_value = ioread32(working_address);
+ buffer = (value << offset);
+ register_value_to_write = read_register_value & ~mask;
+ delta = buffer & mask;
+ register_value_to_write |= delta;
+ iowrite32(register_value_to_write, working_address);
+ return 0;
+}
+
+/**
+ * utils_sysfs_path_to_eth_port_num - Get the current ethernet port
+ * @kobj: The kobject of the entry calling the function
+ *
+ * Extracts the number of the current ethernet port instance
+ *
+ * Return: The number of the ethernet port instance (0 - MAX_NUM_ETH_PORTS) on
+ * success, -1 otherwise
+ */
+static int utils_sysfs_path_to_eth_port_num(struct kobject *kobj)
+{
+ char *current_path = NULL;
+ int port;
+ int ret;
+
+ current_path = kobject_get_path(kobj, GFP_KERNEL);
+ ret = sscanf(current_path, "/kernel/xroe/framer/eth_port_%d/", &port);
+ /* if sscanf() returns 0, no fields were assigned, therefore no
+ * adjustments will be made for port number
+ */
+ if (ret == 0)
+ port = 0;
+// printk(KERN_ALERT "current_path: %s port: %d\n", current_path, port);
+ kfree(current_path);
+ return port;
+}
+
+/**
+ * utils_sysfs_store_wrapper - Wraps the storing function for sysfs entries
+ * @address: The address of the register to be written
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be written
+ * @value: The value to be written to the register
+ * @kobj: The kobject of the entry calling the function
+ *
+ * Wraps the core functionality of all "store" functions of sysfs entries.
+ * After calculating the ethernet port number (in N/A cases, it's 0), the value
+ * is written to the designated register
+ *
+ */
+void utils_sysfs_store_wrapper(u32 address, u32 offset, u32 mask, u32 value,
+ struct kobject *kobj)
+{
+ int port;
+ void __iomem *working_address;
+
+ port = utils_sysfs_path_to_eth_port_num(kobj);
+ working_address = (void __iomem *)(lp->base_addr +
+ (address + (0x100 * port)));
+ utils_write32withmask(working_address, value, mask, offset);
+}
+
+/**
+ * utils_sysfs_store_wrapper - Wraps the storing function for sysfs entries
+ * @address: The address of the register to be read
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be read
+ * @kobj: The kobject of the entry calling the function
+ *
+ * Wraps the core functionality of all "show" functions of sysfs entries.
+ * After calculating the ethernet port number (in N/A cases, it's 0), the value
+ * is read from the designated register and returned.
+ *
+ * Return: The value designated by the address, offset and mask
+ */
+u32 utils_sysfs_show_wrapper(u32 address, u32 offset, u32 mask,
+ struct kobject *kobj)
+{
+ int port;
+ void __iomem *working_address;
+ u32 buffer;
+
+ port = utils_sysfs_path_to_eth_port_num(kobj);
+ working_address = (void __iomem *)(lp->base_addr +
+ (address + (0x100 * port)));
+ buffer = ioread32(working_address);
+ return (buffer & mask) >> offset;
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c
new file mode 100644
index 000000000000..aaaefb10c597
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 15 };
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+static void utils_ipv4addr_hextochar(u32 ip, unsigned char *bytes);
+static int utils_ipv4addr_chartohex(char *ip_addr, uint32_t *p_ip_addr);
+
+/**
+ * ipv4_version_show - Returns the IPv4 version number
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 version number
+ *
+ * Returns the IPv4 version number
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 version;
+
+ version = utils_sysfs_show_wrapper(ETH_IPV4_VERSION_ADDR,
+ ETH_IPV4_VERSION_OFFSET,
+ ETH_IPV4_VERSION_MASK, kobj);
+ sprintf(buff, "%d\n", version);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_version_store - Writes to the IPv4 version number sysfs entry
+ * (not permitted)
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 version
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 version number sysfs entry (not permitted)
+ *
+ * Return: 0
+ */
+static ssize_t ipv4_version_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ return 0;
+}
+
+/**
+ * ipv4_ihl_show - Returns the IPv4 IHL
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 IHL
+ *
+ * Returns the IPv4 IHL
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_ihl_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 ihl;
+
+ ihl = utils_sysfs_show_wrapper(ETH_IPV4_IHL_ADDR, ETH_IPV4_IHL_OFFSET,
+ ETH_IPV4_IHL_MASK, kobj);
+ sprintf(buff, "%d\n", ihl);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_ihl_store - Writes to the IPv4 IHL sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 IHL
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 IHL sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_ihl_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 ihl;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &ihl);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_IHL_ADDR, ETH_IPV4_IHL_OFFSET,
+ ETH_IPV4_IHL_MASK, ihl, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_dscp_show - Returns the IPv4 DSCP
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 DSCP
+ *
+ * Returns the IPv4 DSCP
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_dscp_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 dscp;
+
+ dscp = utils_sysfs_show_wrapper(ETH_IPV4_DSCP_ADDR,
+ ETH_IPV4_DSCP_OFFSET,
+ ETH_IPV4_DSCP_MASK, kobj);
+ sprintf(buff, "%d\n", dscp);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_dscp_store - Writes to the IPv4 DSCP sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 DSCP
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 DSCP sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_dscp_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 dscp;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &dscp);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_DSCP_ADDR, ETH_IPV4_DSCP_OFFSET,
+ ETH_IPV4_DSCP_MASK, dscp, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_ecn_show - Returns the IPv4 ECN
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ECN
+ *
+ * Returns the IPv4 ECN
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_ecn_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 ecn;
+
+ ecn = utils_sysfs_show_wrapper(ETH_IPV4_ECN_ADDR, ETH_IPV4_ECN_OFFSET,
+ ETH_IPV4_ECN_MASK, kobj);
+ sprintf(buff, "%d\n", ecn);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_ecn_store - Writes to the IPv4 ECN sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ECN
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 ECN sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_ecn_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 ecn;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &ecn);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_ECN_ADDR, ETH_IPV4_ECN_OFFSET,
+ ETH_IPV4_ECN_MASK, ecn, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_id_show - Returns the IPv4 ID
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ID
+ *
+ * Returns the IPv4 ID
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_id_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 id;
+
+ id = utils_sysfs_show_wrapper(ETH_IPV4_ID_ADDR, ETH_IPV4_ID_OFFSET,
+ ETH_IPV4_ID_MASK, kobj);
+ sprintf(buff, "%d\n", id);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_id_store - Writes to the IPv4 ID sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ID
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 ID sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_id_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 id;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &id);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_ID_ADDR, ETH_IPV4_ID_OFFSET,
+ ETH_IPV4_ID_MASK, id, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_flags_show - Returns the IPv4 flags
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 flags
+ *
+ * Returns the IPv4 flags
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_flags_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 flags;
+
+ flags = utils_sysfs_show_wrapper(ETH_IPV4_FLAGS_ADDR,
+ ETH_IPV4_FLAGS_OFFSET,
+ ETH_IPV4_FLAGS_MASK, kobj);
+ sprintf(buff, "%d\n", flags);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_flags_store - Writes to the IPv4 flags sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 flags
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 flags sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_flags_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 flags;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &flags);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_FLAGS_ADDR, ETH_IPV4_FLAGS_OFFSET,
+ ETH_IPV4_FLAGS_MASK, flags, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_fragment_offset_show - Returns the IPv4 fragment offset
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 fragment offset
+ *
+ * Returns the IPv4 fragment offset
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_fragment_offset_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 fragment;
+
+ fragment = utils_sysfs_show_wrapper(ETH_IPV4_FRAGMENT_OFFSET_ADDR,
+ ETH_IPV4_FRAGMENT_OFFSET_OFFSET,
+ ETH_IPV4_FRAGMENT_OFFSET_MASK,
+ kobj);
+ sprintf(buff, "%d\n", fragment);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_fragment_offset_store - Writes to the IPv4 fragment offset sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 fragment offset
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 fragment offset sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_fragment_offset_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ int ret;
+ u32 fragment;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &fragment);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_FRAGMENT_OFFSET_ADDR,
+ ETH_IPV4_FRAGMENT_OFFSET_OFFSET,
+ ETH_IPV4_FRAGMENT_OFFSET_MASK, fragment,
+ kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_ttl_show - Returns the IPv4 TTL
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 TTL
+ *
+ * Returns the IPv4 TTL
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_ttl_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 ttl;
+
+ ttl = utils_sysfs_show_wrapper(ETH_IPV4_TIME_TO_LIVE_ADDR,
+ ETH_IPV4_TIME_TO_LIVE_OFFSET,
+ ETH_IPV4_TIME_TO_LIVE_MASK, kobj);
+ sprintf(buff, "%d\n", ttl);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_ttl_store - Writes to the IPv4 TTL sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 TTL
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 TTL sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_ttl_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 ttl;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &ttl);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_TIME_TO_LIVE_ADDR,
+ ETH_IPV4_TIME_TO_LIVE_OFFSET,
+ ETH_IPV4_TIME_TO_LIVE_MASK, ttl, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_protocol_show - Returns the IPv4 protocol
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 protocol
+ *
+ * Returns the IPv4 protocol
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_protocol_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 protocol;
+
+ protocol = utils_sysfs_show_wrapper(ETH_IPV4_PROTOCOL_ADDR,
+ ETH_IPV4_PROTOCOL_OFFSET,
+ ETH_IPV4_PROTOCOL_MASK, kobj);
+ sprintf(buff, "%d\n", protocol);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_protocol_store - Writes to the IPv4 protocol sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 protocol
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 protocol sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_protocol_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 protocol;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &protocol);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_PROTOCOL_ADDR,
+ ETH_IPV4_PROTOCOL_OFFSET,
+ ETH_IPV4_PROTOCOL_MASK, protocol, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_source_address_show - Returns the IPv4 source address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ *
+ * Returns the IPv4 source address in x.x.x.x format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_source_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 source_add = 0;
+ unsigned char ip_addr_char[4];
+
+ source_add = utils_sysfs_show_wrapper(ETH_IPV4_SOURCE_ADD_ADDR,
+ ETH_IPV4_SOURCE_ADD_OFFSET,
+ ETH_IPV4_SOURCE_ADD_MASK, kobj);
+ utils_ipv4addr_hextochar(source_add, ip_addr_char);
+ sprintf(buff, "%d.%d.%d.%d\n", ip_addr_char[3], ip_addr_char[2],
+ ip_addr_char[1], ip_addr_char[0]);
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_source_address_store - Writes to the IPv4 source address sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 source address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_source_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 source_add = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv4addr_chartohex(xroe_tmp, &source_add) == 4)
+ utils_sysfs_store_wrapper(ETH_IPV4_SOURCE_ADD_ADDR,
+ ETH_IPV4_SOURCE_ADD_OFFSET,
+ ETH_IPV4_SOURCE_ADD_MASK, source_add,
+ kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_destination_address_show - Returns the IPv4 destination address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ *
+ * Returns the IPv4 destination address in x.x.x.x format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_destination_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 dest_add = 0;
+ unsigned char ip_addr_char[4];
+
+ dest_add = utils_sysfs_show_wrapper(ETH_IPV4_DESTINATION_ADD_ADDR,
+ ETH_IPV4_DESTINATION_ADD_OFFSET,
+ ETH_IPV4_DESTINATION_ADD_MASK,
+ kobj);
+ utils_ipv4addr_hextochar(dest_add, ip_addr_char);
+ sprintf(buff, "%d.%d.%d.%d\n", ip_addr_char[3], ip_addr_char[2],
+ ip_addr_char[1], ip_addr_char[0]);
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_destination_address_store - Writes to the IPv4 destination address
+ * sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 destination address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_destination_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 dest_add = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv4addr_chartohex(xroe_tmp, &dest_add) == 4)
+ utils_sysfs_store_wrapper(ETH_IPV4_DESTINATION_ADD_ADDR,
+ ETH_IPV4_DESTINATION_ADD_OFFSET,
+ ETH_IPV4_DESTINATION_ADD_MASK,
+ dest_add, kobj);
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute version_attribute =
+ __ATTR(version, 0444, ipv4_version_show, ipv4_version_store);
+static struct kobj_attribute ihl_attribute =
+ __ATTR(ihl, 0660, ipv4_ihl_show, ipv4_ihl_store);
+static struct kobj_attribute dscp_attribute =
+ __ATTR(dscp, 0660, ipv4_dscp_show, ipv4_dscp_store);
+static struct kobj_attribute ecn_attribute =
+ __ATTR(ecn, 0660, ipv4_ecn_show, ipv4_ecn_store);
+static struct kobj_attribute id_attribute =
+ __ATTR(id, 0660, ipv4_id_show, ipv4_id_store);
+static struct kobj_attribute flags_attribute =
+ __ATTR(flags, 0660, ipv4_flags_show, ipv4_flags_store);
+static struct kobj_attribute fragment_offset_attribute =
+ __ATTR(fragment_offset, 0660, ipv4_fragment_offset_show,
+ ipv4_fragment_offset_store);
+static struct kobj_attribute ttl_attribute =
+ __ATTR(ttl, 0660, ipv4_ttl_show, ipv4_ttl_store);
+static struct kobj_attribute protocol_attribute =
+ __ATTR(protocol, 0660, ipv4_protocol_show, ipv4_protocol_store);
+static struct kobj_attribute source_add_attribute =
+ __ATTR(source_add, 0660, ipv4_source_address_show,
+ ipv4_source_address_store);
+static struct kobj_attribute destination_add_attribute =
+ __ATTR(dest_add, 0660, ipv4_destination_address_show,
+ ipv4_destination_address_store);
+
+static struct attribute *attrs[] = {
+ &version_attribute.attr,
+ &ihl_attribute.attr,
+ &dscp_attribute.attr,
+ &ecn_attribute.attr,
+ &id_attribute.attr,
+ &flags_attribute.attr,
+ &fragment_offset_attribute.attr,
+ &ttl_attribute.attr,
+ &protocol_attribute.attr,
+ &source_add_attribute.attr,
+ &destination_add_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+struct kobject *kobj_framer;
+static struct kobject *kobj_ipv4[MAX_NUM_ETH_PORTS];
+struct kobject *kobj_eth_ports[MAX_NUM_ETH_PORTS];
+
+/**
+ * xroe_sysfs_ipv4_init - Creates the xroe sysfs "ipv4" subdirectory & entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "ipv4" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_ipv4_init(void)
+{
+ int ret;
+ int i;
+ char eth_port_dir_name[11];
+
+ kobj_framer = kobject_create_and_add("framer", root_xroe_kobj);
+ if (!kobj_framer)
+ return -ENOMEM;
+ for (i = 0; i < 4; i++) {
+ snprintf(eth_port_dir_name, sizeof(eth_port_dir_name),
+ "eth_port_%d", i);
+ kobj_eth_ports[i] = kobject_create_and_add(eth_port_dir_name,
+ kobj_framer);
+ if (!kobj_eth_ports[i])
+ return -ENOMEM;
+ kobj_ipv4[i] = kobject_create_and_add("ipv4",
+ kobj_eth_ports[i]);
+ if (!kobj_ipv4[i])
+ return -ENOMEM;
+ ret = sysfs_create_group(kobj_ipv4[i], &attr_group);
+ if (ret)
+ kobject_put(kobj_ipv4[i]);
+ }
+ return ret;
+}
+
+/**
+ * xroe_sysfs_ipv4_exit - Deletes the xroe sysfs "ipv4" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "ipv4" subdirectory and entries,
+ * under the "xroe" entry
+ */
+void xroe_sysfs_ipv4_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_ipv4[i]);
+}
+
+/**
+ * utils_ipv4addr_hextochar - Integer to char array for IPv4 addresses
+ * @ip: The IP address in integer format
+ * @bytes: The IP address in a 4-byte array
+ *
+ * Coverts an IPv4 address given in unsigned integer format to a character array
+ */
+static void utils_ipv4addr_hextochar(u32 ip, unsigned char *bytes)
+{
+ bytes[0] = ip & 0xFF;
+ bytes[1] = (ip >> 8) & 0xFF;
+ bytes[2] = (ip >> 16) & 0xFF;
+ bytes[3] = (ip >> 24) & 0xFF;
+}
+
+/**
+ * utils_ipv4addr_chartohex - Character to char array for IPv4 addresses
+ * @ip_addr: The character array containing the IP address
+ * @p_ip_addr: The converted IPv4 address
+ *
+ * Coverts an IPv4 address given as a character array to integer format
+ *
+ * Return: 4 (the length of the resulting character array) on success,
+ * -1 in case of wrong input
+ */
+static int utils_ipv4addr_chartohex(char *ip_addr, uint32_t *p_ip_addr)
+{
+ int count = 0, ret = -1;
+ char *string;
+ unsigned char *found;
+ u32 byte_array[4];
+ u32 byte = 0;
+
+ string = ip_addr;
+ while ((found = (unsigned char *)strsep(&string, ".")) != NULL) {
+ if (count <= 4) {
+ ret = kstrtouint(found, 10, &byte);
+ if (ret)
+ return ret;
+ byte_array[count] = byte;
+ } else {
+ break;
+ }
+ count++;
+ }
+
+ if (count == 4) {
+ ret = count;
+ *p_ip_addr = byte_array[3] | (byte_array[2] << 8)
+ | (byte_array[1] << 16) | (byte_array[0] << 24);
+ }
+ return ret;
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c
new file mode 100644
index 000000000000..c26eae426cc1
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 60 };
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+static void utils_ipv6addr_32to16(u32 *ip32, uint16_t *ip16);
+static int utils_ipv6addr_chartohex(char *ip_addr, uint32_t *p_ip_addr);
+
+/**
+ * ipv6_version_show - Returns the IPv6 version number
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 version number
+ *
+ * Returns the IPv6 version number
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 version;
+
+ version = utils_sysfs_show_wrapper(ETH_IPV6_V_ADDR, ETH_IPV6_V_OFFSET,
+ ETH_IPV6_V_MASK, kobj);
+ sprintf(buff, "%d\n", version);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_version_store - Writes to the IPv6 version number sysfs entry
+ * (not permitted)
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 version
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 version number sysfs entry (not permitted)
+ *
+ * Return: 0
+ */
+static ssize_t ipv6_version_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ return 0;
+}
+
+/**
+ * ipv6_traffic_class_show - Returns the IPv6 traffic class
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 traffic class
+ *
+ * Returns the IPv6 traffic class
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_traffic_class_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 traffic_class;
+
+ traffic_class = utils_sysfs_show_wrapper(ETH_IPV6_TRAFFIC_CLASS_ADDR,
+ ETH_IPV6_TRAFFIC_CLASS_OFFSET,
+ ETH_IPV6_TRAFFIC_CLASS_MASK,
+ kobj);
+ sprintf(buff, "%d\n", traffic_class);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_traffic_class_store - Writes to the IPv6 traffic class
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 traffic class
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 traffic class sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_traffic_class_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 traffic_class;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &traffic_class);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_TRAFFIC_CLASS_ADDR,
+ ETH_IPV6_TRAFFIC_CLASS_OFFSET,
+ ETH_IPV6_TRAFFIC_CLASS_MASK, traffic_class,
+ kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_flow_label_show - Returns the IPv6 flow label
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 flow label
+ *
+ * Returns the IPv6 flow label
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_flow_label_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 flow_label;
+
+ flow_label = utils_sysfs_show_wrapper(ETH_IPV6_FLOW_LABEL_ADDR,
+ ETH_IPV6_FLOW_LABEL_OFFSET,
+ ETH_IPV6_FLOW_LABEL_MASK, kobj);
+ sprintf(buff, "%d\n", flow_label);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_flow_label_store - Writes to the IPv6 flow label
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 flow label
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 flow label sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_flow_label_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 flow_label;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &flow_label);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_FLOW_LABEL_ADDR,
+ ETH_IPV6_FLOW_LABEL_OFFSET,
+ ETH_IPV6_FLOW_LABEL_MASK, flow_label, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_next_header_show - Returns the IPv6 next header
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 next header
+ *
+ * Returns the IPv6 next header
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_next_header_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 next_header;
+
+ next_header = utils_sysfs_show_wrapper(ETH_IPV6_NEXT_HEADER_ADDR,
+ ETH_IPV6_NEXT_HEADER_OFFSET,
+ ETH_IPV6_NEXT_HEADER_MASK, kobj);
+ sprintf(buff, "%d\n", next_header);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_next_header_store - Writes to the IPv6 next header
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 next header
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 next header sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_next_header_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 next_header;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &next_header);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_NEXT_HEADER_ADDR,
+ ETH_IPV6_NEXT_HEADER_OFFSET,
+ ETH_IPV6_NEXT_HEADER_MASK, next_header, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_hop_limit_show - Returns the IPv6 hop limit
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 hop limit
+ *
+ * Returns the IPv6 hop limit
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_hop_limit_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 hop_limit;
+
+ hop_limit = utils_sysfs_show_wrapper(ETH_IPV6_HOP_LIMIT_ADDR,
+ ETH_IPV6_HOP_LIMIT_OFFSET,
+ ETH_IPV6_HOP_LIMIT_MASK, kobj);
+ sprintf(buff, "%d\n", hop_limit);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_hop_limit_store - Writes to the IPv6 hop limit
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 hop limit
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 hop limit sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_hop_limit_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ int ret;
+ u32 hop_limit;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &hop_limit);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_HOP_LIMIT_ADDR,
+ ETH_IPV6_HOP_LIMIT_OFFSET,
+ ETH_IPV6_HOP_LIMIT_MASK, hop_limit, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_source_address_show - Returns the IPv6 source address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ *
+ * Returns the IPv6 source address in xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx
+ * format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_source_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 source[4];
+ u16 source_add16[8];
+
+ source[0] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_31_0_ADDR,
+ ETH_IPV6_SOURCE_ADD_31_0_OFFSET,
+ ETH_IPV6_SOURCE_ADD_31_0_MASK,
+ kobj);
+ source[1] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_63_32_ADDR,
+ ETH_IPV6_SOURCE_ADD_63_32_OFFSET,
+ ETH_IPV6_SOURCE_ADD_63_32_MASK,
+ kobj);
+ source[2] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_95_64_ADDR,
+ ETH_IPV6_SOURCE_ADD_95_64_OFFSET,
+ ETH_IPV6_SOURCE_ADD_95_64_MASK,
+ kobj);
+ source[3] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_127_96_ADDR,
+ ETH_IPV6_SOURCE_ADD_127_96_OFFSET,
+ ETH_IPV6_SOURCE_ADD_127_96_MASK,
+ kobj);
+
+ utils_ipv6addr_32to16(source, source_add16);
+ sprintf(buff, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ source_add16[0], source_add16[1], source_add16[2],
+ source_add16[3],
+ source_add16[4], source_add16[5], source_add16[6],
+ source_add16[7]);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_source_address_store - Writes to the IPv6 source address sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 source address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_source_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 source_add[4];
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv6addr_chartohex(xroe_tmp, source_add) == 8) {
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_31_0_ADDR,
+ ETH_IPV6_SOURCE_ADD_31_0_OFFSET,
+ ETH_IPV6_SOURCE_ADD_31_0_MASK,
+ source_add[0], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_63_32_ADDR,
+ ETH_IPV6_SOURCE_ADD_63_32_OFFSET,
+ ETH_IPV6_SOURCE_ADD_63_32_MASK,
+ source_add[1], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_95_64_ADDR,
+ ETH_IPV6_SOURCE_ADD_95_64_OFFSET,
+ ETH_IPV6_SOURCE_ADD_95_64_MASK,
+ source_add[2], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_127_96_ADDR,
+ ETH_IPV6_SOURCE_ADD_127_96_OFFSET,
+ ETH_IPV6_SOURCE_ADD_127_96_MASK,
+ source_add[3], kobj);
+ }
+ return xroe_size;
+}
+
+/**
+ * ipv6_destination_address_show - Returns the IPv6 destination address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ *
+ * Returns the IPv6 destination address in
+ * xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_destination_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 dest[4];
+ u16 dest_add16[8];
+
+ dest[0] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_31_0_ADDR,
+ ETH_IPV6_DEST_ADD_31_0_OFFSET,
+ ETH_IPV6_DEST_ADD_31_0_MASK,
+ kobj);
+ dest[1] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_63_32_ADDR,
+ ETH_IPV6_DEST_ADD_63_32_OFFSET,
+ ETH_IPV6_DEST_ADD_63_32_MASK,
+ kobj);
+ dest[2] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_95_64_ADDR,
+ ETH_IPV6_DEST_ADD_95_64_OFFSET,
+ ETH_IPV6_DEST_ADD_95_64_MASK,
+ kobj);
+ dest[3] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_127_96_ADDR,
+ ETH_IPV6_DEST_ADD_127_96_OFFSET,
+ ETH_IPV6_DEST_ADD_127_96_MASK,
+ kobj);
+
+ utils_ipv6addr_32to16(dest, dest_add16);
+ sprintf(buff, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ dest_add16[0], dest_add16[1], dest_add16[2],
+ dest_add16[3],
+ dest_add16[4], dest_add16[5], dest_add16[6],
+ dest_add16[7]);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_destination_address_store - Writes to the IPv6 destination address
+ * sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 destination address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_destination_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 dest_add[4];
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv6addr_chartohex(xroe_tmp, dest_add) == 8) {
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_31_0_ADDR,
+ ETH_IPV6_DEST_ADD_31_0_OFFSET,
+ ETH_IPV6_DEST_ADD_31_0_MASK,
+ dest_add[0], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_63_32_ADDR,
+ ETH_IPV6_DEST_ADD_63_32_OFFSET,
+ ETH_IPV6_DEST_ADD_63_32_MASK,
+ dest_add[1], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_95_64_ADDR,
+ ETH_IPV6_DEST_ADD_95_64_OFFSET,
+ ETH_IPV6_DEST_ADD_95_64_MASK,
+ dest_add[2], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_127_96_ADDR,
+ ETH_IPV6_DEST_ADD_127_96_OFFSET,
+ ETH_IPV6_DEST_ADD_127_96_MASK,
+ dest_add[3], kobj);
+ }
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute version_attribute =
+ __ATTR(version, 0444, ipv6_version_show, ipv6_version_store);
+static struct kobj_attribute traffic_class =
+ __ATTR(traffic_class, 0660, ipv6_traffic_class_show,
+ ipv6_traffic_class_store);
+static struct kobj_attribute flow_label =
+ __ATTR(flow_label, 0660, ipv6_flow_label_show, ipv6_flow_label_store);
+static struct kobj_attribute next_header =
+ __ATTR(next_header, 0660, ipv6_next_header_show,
+ ipv6_next_header_store);
+static struct kobj_attribute hop_limit =
+ __ATTR(hop_limit, 0660, ipv6_hop_limit_show, ipv6_hop_limit_store);
+static struct kobj_attribute source_add_attribute =
+ __ATTR(source_add, 0660, ipv6_source_address_show,
+ ipv6_source_address_store);
+static struct kobj_attribute dest_add_attribute =
+ __ATTR(dest_add, 0660, ipv6_destination_address_show,
+ ipv6_destination_address_store);
+
+static struct attribute *attrs[] = {
+ &version_attribute.attr,
+ &traffic_class.attr,
+ &flow_label.attr,
+ &next_header.attr,
+ &hop_limit.attr,
+ &source_add_attribute.attr,
+ &dest_add_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static struct kobject *kobj_ipv6[MAX_NUM_ETH_PORTS];
+
+/**
+ * xroe_sysfs_ipv6_init - Creates the xroe sysfs "ipv6" subdirectory & entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "ipv6" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_ipv6_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ kobj_ipv6[i] = kobject_create_and_add("ipv6",
+ kobj_eth_ports[i]);
+ if (!kobj_ipv6[i])
+ return -ENOMEM;
+ ret = sysfs_create_group(kobj_ipv6[i], &attr_group);
+ if (ret)
+ kobject_put(kobj_ipv6[i]);
+ }
+ return ret;
+}
+
+/**
+ * xroe_sysfs_ipv4_exit - Deletes the xroe sysfs "ipv6" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "ipv6" subdirectory and entries,
+ * under the "xroe" entry
+ *
+ */
+void xroe_sysfs_ipv6_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_ipv6[i]);
+}
+
+/**
+ * utils_ipv6addr_32to16 - uint32_t to uint16_t for IPv6 addresses
+ * @ip32: The IPv6 address in uint32_t format
+ * @ip16: The IPv6 address in uint16_t format
+ *
+ * Coverts an IPv6 address given in uint32_t format to uint16_t
+ */
+static void utils_ipv6addr_32to16(u32 *ip32, uint16_t *ip16)
+{
+ ip16[0] = ip32[0] >> 16;
+ ip16[1] = ip32[0] & 0x0000FFFF;
+ ip16[2] = ip32[1] >> 16;
+ ip16[3] = ip32[1] & 0x0000FFFF;
+ ip16[4] = ip32[2] >> 16;
+ ip16[5] = ip32[2] & 0x0000FFFF;
+ ip16[6] = ip32[3] >> 16;
+ ip16[7] = ip32[3] & 0x0000FFFF;
+}
+
+/**
+ * utils_ipv6addr_chartohex - Character to char array for IPv6 addresses
+ * @ip_addr: The character array containing the IP address
+ * @p_ip_addr: The converted IPv4 address
+ *
+ * Coverts an IPv6 address given as a character array to integer format
+ *
+ * Return: 8 (the length of the resulting character array) on success,
+ * -1 in case of wrong input
+ */
+static int utils_ipv6addr_chartohex(char *ip_addr, uint32_t *p_ip_addr)
+{
+ int ret;
+ int count;
+ char *string;
+ unsigned char *found;
+ u16 ip_array_16[8];
+ u32 field;
+
+ ret = -1;
+ count = 0;
+ string = ip_addr;
+ while ((found = (unsigned char *)strsep(&string, ":")) != NULL) {
+ if (count <= 8) {
+ ret = kstrtouint(found, 16, &field);
+ if (ret)
+ return ret;
+ ip_array_16[count] = (uint16_t)field;
+ } else {
+ break;
+ }
+ count++;
+ }
+ if (count == 8) {
+ p_ip_addr[0] = ip_array_16[1] | (ip_array_16[0] << 16);
+ p_ip_addr[1] = ip_array_16[3] | (ip_array_16[2] << 16);
+ p_ip_addr[2] = ip_array_16[5] | (ip_array_16[4] << 16);
+ p_ip_addr[3] = ip_array_16[7] | (ip_array_16[6] << 16);
+ ret = count;
+ }
+ return ret;
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_stats.c b/drivers/staging/xroeframer/sysfs_xroe_framer_stats.c
new file mode 100644
index 000000000000..063664bb987a
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_stats.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+/**
+ * total_rx_good_pkt_show - Returns the total good rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total good rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_good_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_TOTAL_RX_GOOD_PKT_CNT_ADDR,
+ STATS_TOTAL_RX_GOOD_PKT_CNT_OFFSET,
+ STATS_TOTAL_RX_GOOD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_pkt_show - Returns the total bad rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_TOTAL_RX_BAD_PKT_CNT_ADDR,
+ STATS_TOTAL_RX_BAD_PKT_CNT_OFFSET,
+ STATS_TOTAL_RX_BAD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_fcs_show - Returns the total bad fcs count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad frame check sequences count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_fcs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_TOTAL_RX_BAD_FCS_CNT_ADDR,
+ STATS_TOTAL_RX_BAD_FCS_CNT_OFFSET,
+ STATS_TOTAL_RX_BAD_FCS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_user_pkt_show - Returns the total user rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_user_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_PACKETS_CNT_ADDR,
+ STATS_USER_DATA_RX_PACKETS_CNT_OFFSET,
+ STATS_USER_DATA_RX_PACKETS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_good_user_pkt_show - Returns the total good user rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total good user rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_good_user_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_GOOD_PKT_CNT_ADDR,
+ STATS_USER_DATA_RX_GOOD_PKT_CNT_OFFSET,
+ STATS_USER_DATA_RX_GOOD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_pkt_show - Returns the total bad user rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_BAD_PKT_CNT_ADDR,
+ STATS_USER_DATA_RX_BAD_PKT_CNT_OFFSET,
+ STATS_USER_DATA_RX_BAD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_fcs_show - Returns the total bad user rx fcs count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user frame check sequences count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_fcs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_BAD_FCS_CNT_ADDR,
+ STATS_USER_DATA_RX_BAD_FCS_CNT_OFFSET,
+ STATS_USER_DATA_RX_BAD_FCS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_user_ctrl_pkt_show - Returns the total user rx control packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx control packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_user_ctrl_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_PACKETS_CNT_ADDR,
+ STATS_USER_CTRL_RX_PACKETS_CNT_OFFSET,
+ STATS_USER_CTRL_RX_PACKETS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_good_user_ctrl_pkt_show - Returns the total good user rx
+ * control packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total good user rx control packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_good_user_ctrl_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_GOOD_PKT_CNT_ADDR,
+ STATS_USER_CTRL_RX_GOOD_PKT_CNT_OFFSET,
+ STATS_USER_CTRL_RX_GOOD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_ctrl_pkt_show - Returns the total bad user rx
+ * control packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user rx control packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_ctrl_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_BAD_PKT_CNT_ADDR,
+ STATS_USER_CTRL_RX_BAD_PKT_CNT_OFFSET,
+ STATS_USER_CTRL_RX_BAD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_ctrl_fcs_show - Returns the total bad user rx
+ * control fcs count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user control frame check sequences count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_ctrl_fcs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_BAD_FCS_CNT_ADDR,
+ STATS_USER_CTRL_RX_BAD_FCS_CNT_OFFSET,
+ STATS_USER_CTRL_RX_BAD_FCS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * rx_user_pkt_rate_show - Returns the rate of user packets
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx packet count
+ *
+ * Return: Returns the rate of user packets
+ */
+static ssize_t rx_user_pkt_rate_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 rate;
+
+ rate = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_PKTS_RATE_ADDR,
+ STATS_USER_DATA_RX_PKTS_RATE_OFFSET,
+ STATS_USER_DATA_RX_PKTS_RATE_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", rate);
+}
+
+/**
+ * rx_user_ctrl_pkt_rate_show - Returns the rate of user control packets
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx packet count
+ *
+ * Return: Returns the rate of user control packets
+ */
+static ssize_t rx_user_ctrl_pkt_rate_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 rate;
+
+ rate = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_PKTS_RATE_ADDR,
+ STATS_USER_CTRL_RX_PKTS_RATE_OFFSET,
+ STATS_USER_CTRL_RX_PKTS_RATE_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", rate);
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+static struct kobj_attribute total_rx_good_pkt_attribute =
+ __ATTR(total_rx_good_pkt, 0444, total_rx_good_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_pkt_attribute =
+ __ATTR(total_rx_bad_pkt, 0444, total_rx_bad_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_fcs_attribute =
+ __ATTR(total_rx_bad_fcs, 0444, total_rx_bad_fcs_show, NULL);
+static struct kobj_attribute total_rx_user_pkt_attribute =
+ __ATTR(total_rx_user_pkt, 0444, total_rx_user_pkt_show, NULL);
+static struct kobj_attribute total_rx_good_user_pkt_attribute =
+ __ATTR(total_rx_good_user_pkt, 0444, total_rx_good_user_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_pkt_attribute =
+ __ATTR(total_rx_bad_user_pkt, 0444, total_rx_bad_user_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_fcs_attribute =
+ __ATTR(total_rx_bad_user_fcs, 0444, total_rx_bad_user_fcs_show, NULL);
+static struct kobj_attribute total_rx_user_ctrl_pkt_attribute =
+ __ATTR(total_rx_user_ctrl_pkt, 0444, total_rx_user_ctrl_pkt_show, NULL);
+static struct kobj_attribute total_rx_good_user_ctrl_pkt_attribute =
+ __ATTR(total_rx_good_user_ctrl_pkt, 0444,
+ total_rx_good_user_ctrl_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_ctrl_pkt_attribute =
+ __ATTR(total_rx_bad_user_ctrl_pkt, 0444,
+ total_rx_bad_user_ctrl_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_ctrl_fcs_attribute =
+ __ATTR(total_rx_bad_user_ctrl_fcs, 0444,
+ total_rx_bad_user_ctrl_fcs_show, NULL);
+static struct kobj_attribute rx_user_pkt_rate_attribute =
+ __ATTR(rx_user_pkt_rate, 0444, rx_user_pkt_rate_show, NULL);
+static struct kobj_attribute rx_user_ctrl_pkt_rate_attribute =
+ __ATTR(rx_user_ctrl_pkt_rate, 0444, rx_user_ctrl_pkt_rate_show, NULL);
+
+static struct attribute *attrs[] = {
+ &total_rx_good_pkt_attribute.attr,
+ &total_rx_bad_pkt_attribute.attr,
+ &total_rx_bad_fcs_attribute.attr,
+ &total_rx_user_pkt_attribute.attr,
+ &total_rx_good_user_pkt_attribute.attr,
+ &total_rx_bad_user_pkt_attribute.attr,
+ &total_rx_bad_user_fcs_attribute.attr,
+ &total_rx_user_ctrl_pkt_attribute.attr,
+ &total_rx_good_user_ctrl_pkt_attribute.attr,
+ &total_rx_bad_user_ctrl_pkt_attribute.attr,
+ &total_rx_bad_user_ctrl_fcs_attribute.attr,
+ &rx_user_pkt_rate_attribute.attr,
+ &rx_user_ctrl_pkt_rate_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+struct kobject *kobj_stats;
+
+/**
+ * xroe_sysfs_stats_init - Creates the xroe sysfs "stats" subdirectory & entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "stats" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_stats_init(void)
+{
+ int ret;
+
+ kobj_stats = kobject_create_and_add("stats", root_xroe_kobj);
+ if (!kobj_stats)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(kobj_stats, &attr_group);
+ if (ret)
+ kobject_put(kobj_stats);
+
+ return ret;
+}
+
+/**
+ * xroe_sysfs_stats_exit - Deletes the xroe sysfs "ipv4" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "stats" subdirectory and entries,
+ * under the "xroe" entry
+ */
+void xroe_sysfs_stats_exit(void)
+{
+ kobject_put(kobj_stats);
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_udp.c b/drivers/staging/xroeframer/sysfs_xroe_framer_udp.c
new file mode 100644
index 000000000000..8f8a77b25da7
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_udp.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 15 };
+static int xroe_size;
+
+/**
+ * udp_source_port_show - Returns the UDP source port
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP source port
+ *
+ * Returns the UDP source port
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t udp_source_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 source_port;
+
+ source_port = utils_sysfs_show_wrapper(ETH_UDP_SOURCE_PORT_ADDR,
+ ETH_UDP_SOURCE_PORT_OFFSET,
+ ETH_UDP_SOURCE_PORT_MASK, kobj);
+ sprintf(buff, "%d\n", source_port);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * udp_source_port_store - Writes to the UDP source port sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP source port
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the UDP source port sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t udp_source_port_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 source_port;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &source_port);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_UDP_SOURCE_PORT_ADDR,
+ ETH_UDP_SOURCE_PORT_OFFSET,
+ ETH_UDP_SOURCE_PORT_MASK, source_port, kobj);
+ return xroe_size;
+}
+
+/**
+ * udp_destination_port_show - Returns the UDP destination port
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP destination port
+ *
+ * Returns the UDP destination port
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t udp_destination_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 dest_port;
+
+ dest_port = utils_sysfs_show_wrapper(ETH_UDP_DESTINATION_PORT_ADDR,
+ ETH_UDP_DESTINATION_PORT_OFFSET,
+ ETH_UDP_DESTINATION_PORT_MASK,
+ kobj);
+ sprintf(buff, "%d\n", dest_port);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * udp_destination_port_store - Writes to the UDP destination port sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP destination port
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the UDP destination port sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t udp_destination_port_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 dest_port;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &dest_port);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_UDP_DESTINATION_PORT_ADDR,
+ ETH_UDP_DESTINATION_PORT_OFFSET,
+ ETH_UDP_DESTINATION_PORT_MASK, dest_port,
+ kobj);
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute source_port =
+ __ATTR(source_port, 0660, udp_source_port_show,
+ udp_source_port_store);
+static struct kobj_attribute dest_port =
+ __ATTR(dest_port, 0660, udp_destination_port_show,
+ udp_destination_port_store);
+
+static struct attribute *attrs[] = {
+ &source_port.attr,
+ &dest_port.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static struct kobject *kobj_udp[MAX_NUM_ETH_PORTS];
+
+/**
+ * xroe_sysfs_udp_init - Creates the xroe sysfs "udp" subdirectory and entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "udp" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_udp_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ kobj_udp[i] = kobject_create_and_add("udp", kobj_eth_ports[i]);
+ if (!kobj_udp[i])
+ return -ENOMEM;
+ ret = sysfs_create_group(kobj_udp[i], &attr_group);
+ if (ret)
+ kobject_put(kobj_udp[i]);
+ }
+ return ret;
+}
+
+/**
+ * xroe_sysfs_ipv6_exit - Deletes the xroe sysfs "udp" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "udp" subdirectory and entries,
+ * under the "xroe" entry
+ *
+ */
+void xroe_sysfs_udp_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_udp[i]);
+}
diff --git a/drivers/staging/xroeframer/xroe_framer.c b/drivers/staging/xroeframer/xroe_framer.c
new file mode 100644
index 000000000000..dba7c69b010f
--- /dev/null
+++ b/drivers/staging/xroeframer/xroe_framer.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "xroe_framer.h"
+
+#define DRIVER_NAME "framer"
+
+/*
+ * TODO: to be made static as well, so that multiple instances can be used. As
+ * of now, the "lp" structure is shared among the multiple source files
+ */
+struct framer_local *lp;
+static struct platform_driver framer_driver;
+/*
+ * TODO: placeholder for the IRQ once it's been implemented
+ * in the framer block
+ */
+static irqreturn_t framer_irq(int irq, void *lp)
+{
+ return IRQ_HANDLED;
+}
+
+/**
+ * framer_probe - Probes the device tree to locate the framer block
+ * @pdev: The structure containing the device's details
+ *
+ * Probes the device tree to locate the framer block and maps it to
+ * the kernel virtual memory space
+ *
+ * Return: 0 on success or a negative errno on error.
+ */
+static int framer_probe(struct platform_device *pdev)
+{
+ struct resource *r_mem; /* IO mem resources */
+ struct resource *r_irq;
+ struct device *dev = &pdev->dev;
+ int rc = 0;
+
+ dev_dbg(dev, "Device Tree Probing\n");
+ lp = devm_kzalloc(&pdev->dev, sizeof(*lp), GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+
+ /* Get iospace for the device */
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->base_addr = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(lp->base_addr))
+ return PTR_ERR(lp->base_addr);
+
+ dev_set_drvdata(dev, lp);
+ xroe_sysfs_init();
+ /* Get IRQ for the device */
+ /*
+ * TODO: No IRQ *yet* in the DT from the framer block, as it's still
+ * under development. To be added once it's in the block, and also
+ * replace with platform_get_irq_byname()
+ */
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (IS_ERR(r_irq)) {
+ dev_info(dev, "no IRQ found\n");
+ /*
+ * TODO: Return non-zero (error) code on no IRQ found.
+ * To be implemented once the IRQ is in the block
+ */
+ return 0;
+ }
+ rc = devm_request_irq(dev, lp->irq, &framer_irq, 0, DRIVER_NAME, lp);
+ if (rc) {
+ dev_err(dev, "testmodule: Could not allocate interrupt %d.\n",
+ lp->irq);
+ /*
+ * TODO: Return non-zero (error) code on no IRQ found.
+ * To be implemented once the IRQ is in the block
+ */
+ return 0;
+ }
+
+ return rc;
+}
+
+/**
+ * framer_init - Registers the driver
+ *
+ * Return: 0 on success, -1 on allocation error
+ *
+ * Registers the framer driver and creates character device drivers
+ * for the whole block, as well as separate ones for stats and
+ * radio control.
+ */
+static int __init framer_init(void)
+{
+ int ret;
+
+ pr_debug("XROE framer driver init\n");
+
+ ret = platform_driver_register(&framer_driver);
+
+ return ret;
+}
+
+/**
+ * framer_exit - Destroys the driver
+ *
+ * Unregisters the framer driver and destroys the character
+ * device driver for the whole block, as well as the separate ones
+ * for stats and radio control. Returns 0 upon successful execution
+ */
+static void __exit framer_exit(void)
+{
+ xroe_sysfs_exit();
+ platform_driver_unregister(&framer_driver);
+ pr_info("XROE Framer exit\n");
+}
+
+module_init(framer_init);
+module_exit(framer_exit);
+
+static const struct of_device_id framer_of_match[] = {
+ { .compatible = "xlnx,roe-framer-1.0", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, framer_of_match);
+
+static struct platform_driver framer_driver = {
+ .driver = {
+ /*
+ * TODO: .name shouldn't be necessary, though removing
+ * it results in kernel panic. To investigate further
+ */
+ .name = DRIVER_NAME,
+ .of_match_table = framer_of_match,
+ },
+ .probe = framer_probe,
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("framer - Xilinx Radio over Ethernet Framer driver");
diff --git a/drivers/staging/xroeframer/xroe_framer.h b/drivers/staging/xroeframer/xroe_framer.h
new file mode 100644
index 000000000000..03b8bb39095c
--- /dev/null
+++ b/drivers/staging/xroeframer/xroe_framer.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+#include "roe_framer_ctrl.h"
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/stat.h> /* S_IRUSR, S_IWUSR */
+
+/* TODO: Remove hardcoded value of number of Ethernet ports and read the value
+ * from the device tree.
+ */
+#define MAX_NUM_ETH_PORTS 0x4
+/* TODO: to be made static as well, so that multiple instances can be used. As
+ * of now, the following 3 structures are shared among the multiple
+ * source files
+ */
+extern struct framer_local *lp;
+extern struct kobject *root_xroe_kobj;
+extern struct kobject *kobj_framer;
+extern struct kobject *kobj_eth_ports[MAX_NUM_ETH_PORTS];
+struct framer_local {
+ int irq;
+ unsigned long mem_start;
+ unsigned long mem_end;
+ void __iomem *base_addr;
+};
+
+int xroe_sysfs_init(void);
+int xroe_sysfs_ipv4_init(void);
+int xroe_sysfs_ipv6_init(void);
+int xroe_sysfs_udp_init(void);
+int xroe_sysfs_stats_init(void);
+void xroe_sysfs_exit(void);
+void xroe_sysfs_ipv4_exit(void);
+void xroe_sysfs_ipv6_exit(void);
+void xroe_sysfs_udp_exit(void);
+void xroe_sysfs_stats_exit(void);
+int utils_write32withmask(void __iomem *working_address, u32 value,
+ u32 mask, u32 offset);
+int utils_check_address_offset(u32 offset, size_t device_size);
+void utils_sysfs_store_wrapper(u32 address, u32 offset, u32 mask, u32 value,
+ struct kobject *kobj);
+u32 utils_sysfs_show_wrapper(u32 address, u32 offset, u32 mask,
+ struct kobject *kobj);
diff --git a/drivers/staging/xroetrafficgen/Kconfig b/drivers/staging/xroetrafficgen/Kconfig
new file mode 100644
index 000000000000..d2ead1483408
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# Xilinx Radio over Ethernet Traffic Generator driver
+#
+
+config XROE_TRAFFIC_GEN
+ tristate "Xilinx Radio over Ethernet Traffic Generator driver"
+ help
+ The Traffic Generator is used for in testing of other RoE IP Blocks
+ (currenty the XRoE Framer) and simulates an radio antenna interface.
+ It generates rolling rampdata for eCPRI antenna paths.
+ Each path is tagged with the antenna number. The sink locks to this
+ ramp data, then checks the next value is as expected.
diff --git a/drivers/staging/xroetrafficgen/Makefile b/drivers/staging/xroetrafficgen/Makefile
new file mode 100644
index 000000000000..e180a9bbc589
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Radio over Ethernet Framer driver
+#
+obj-$(XROE_TRAFFIC_GEN) := xroe_traffic_gen.o
+
+framer-objs := xroe-traffic-gen.o \
+ xroe-traffic-gen-sysfs.o \
diff --git a/drivers/staging/xroetrafficgen/README b/drivers/staging/xroetrafficgen/README
new file mode 100644
index 000000000000..1828426af847
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/README
@@ -0,0 +1,19 @@
+Xilinx Radio over Ethernet Traffic Generator driver
+===================================================
+
+About the RoE Framer Traffic Generator
+
+The Traffic Generator is used for in testing of other RoE IP Blocks (currenty
+the XRoE Framer) and simulates an radio antenna interface. It generates rolling
+rampdata for eCPRI antenna paths. Each path is tagged with the antenna number.
+The sink locks to this ramp data, then checks the next value is as expected.
+
+
+About the Linux Driver
+
+The RoE Traffic Generator Linux Driver provides sysfs access to control a
+simulated radio antenna interface.
+The loading of the driver to the hardware is possible using Device Tree binding
+(see "dt-binding.txt" for more information). When the driver is loaded, the
+general controls (such as sink lock, enable, loopback etc) are exposed
+under /sys/kernel/xroetrafficgen.
diff --git a/drivers/staging/xroetrafficgen/roe_radio_ctrl.h b/drivers/staging/xroetrafficgen/roe_radio_ctrl.h
new file mode 100644
index 000000000000..e093386f3e94
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/roe_radio_ctrl.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+/*-----------------------------------------------------------------------------
+ * C Header bank BASE definitions
+ *-----------------------------------------------------------------------------
+ */
+#define ROE_RADIO_CFG_BASE_ADDR 0x0
+#define ROE_RADIO_SOURCE_BASE_ADDR 0x1000
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_radio_cfg
+ * with prefix radio_ @ address 0x0
+ *-----------------------------------------------------------------------------
+ */
+/* Type = roInt */
+#define RADIO_ID_ADDR 0x0
+#define RADIO_ID_MASK 0xffffffff
+#define RADIO_ID_OFFSET 0x0
+#define RADIO_ID_WIDTH 0x20
+#define RADIO_ID_DEFAULT 0x120001
+
+/* Type = rw */
+#define RADIO_TIMEOUT_ENABLE_ADDR 0x4
+#define RADIO_TIMEOUT_ENABLE_MASK 0x1
+#define RADIO_TIMEOUT_ENABLE_OFFSET 0x0
+#define RADIO_TIMEOUT_ENABLE_WIDTH 0x1
+#define RADIO_TIMEOUT_ENABLE_DEFAULT 0x0
+
+/* Type = ro */
+#define RADIO_TIMEOUT_STATUS_ADDR 0x8
+#define RADIO_TIMEOUT_STATUS_MASK 0x1
+#define RADIO_TIMEOUT_STATUS_OFFSET 0x0
+#define RADIO_TIMEOUT_STATUS_WIDTH 0x1
+#define RADIO_TIMEOUT_STATUS_DEFAULT 0x1
+
+/* Type = rw */
+#define RADIO_TIMEOUT_VALUE_ADDR 0xc
+#define RADIO_TIMEOUT_VALUE_MASK 0xfff
+#define RADIO_TIMEOUT_VALUE_OFFSET 0x0
+#define RADIO_TIMEOUT_VALUE_WIDTH 0xc
+#define RADIO_TIMEOUT_VALUE_DEFAULT 0x80
+
+/* Type = rw */
+#define RADIO_GPIO_CDC_LEDMODE2_ADDR 0x10
+#define RADIO_GPIO_CDC_LEDMODE2_MASK 0x1
+#define RADIO_GPIO_CDC_LEDMODE2_OFFSET 0x0
+#define RADIO_GPIO_CDC_LEDMODE2_WIDTH 0x1
+#define RADIO_GPIO_CDC_LEDMODE2_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_GPIO_CDC_LEDGPIO_ADDR 0x10
+#define RADIO_GPIO_CDC_LEDGPIO_MASK 0x30
+#define RADIO_GPIO_CDC_LEDGPIO_OFFSET 0x4
+#define RADIO_GPIO_CDC_LEDGPIO_WIDTH 0x2
+#define RADIO_GPIO_CDC_LEDGPIO_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_GPIO_CDC_DIPSTATUS_ADDR 0x14
+#define RADIO_GPIO_CDC_DIPSTATUS_MASK 0xff
+#define RADIO_GPIO_CDC_DIPSTATUS_OFFSET 0x0
+#define RADIO_GPIO_CDC_DIPSTATUS_WIDTH 0x8
+#define RADIO_GPIO_CDC_DIPSTATUS_DEFAULT 0x0
+
+/* Type = wPlsH */
+#define RADIO_SW_TRIGGER_ADDR 0x20
+#define RADIO_SW_TRIGGER_MASK 0x1
+#define RADIO_SW_TRIGGER_OFFSET 0x0
+#define RADIO_SW_TRIGGER_WIDTH 0x1
+#define RADIO_SW_TRIGGER_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_CDC_ENABLE_ADDR 0x24
+#define RADIO_CDC_ENABLE_MASK 0x1
+#define RADIO_CDC_ENABLE_OFFSET 0x0
+#define RADIO_CDC_ENABLE_WIDTH 0x1
+#define RADIO_CDC_ENABLE_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_ADDR 0x24
+#define RADIO_CDC_ERROR_MASK 0x2
+#define RADIO_CDC_ERROR_OFFSET 0x1
+#define RADIO_CDC_ERROR_WIDTH 0x1
+#define RADIO_CDC_ERROR_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_ADDR 0x24
+#define RADIO_CDC_STATUS_MASK 0x4
+#define RADIO_CDC_STATUS_OFFSET 0x2
+#define RADIO_CDC_STATUS_WIDTH 0x1
+#define RADIO_CDC_STATUS_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_CDC_LOOPBACK_ADDR 0x28
+#define RADIO_CDC_LOOPBACK_MASK 0x1
+#define RADIO_CDC_LOOPBACK_OFFSET 0x0
+#define RADIO_CDC_LOOPBACK_WIDTH 0x1
+#define RADIO_CDC_LOOPBACK_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_SINK_ENABLE_ADDR 0x2c
+#define RADIO_SINK_ENABLE_MASK 0x1
+#define RADIO_SINK_ENABLE_OFFSET 0x0
+#define RADIO_SINK_ENABLE_WIDTH 0x1
+#define RADIO_SINK_ENABLE_DEFAULT 0x1
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_31_0_ADDR 0x30
+#define RADIO_CDC_ERROR_31_0_MASK 0xffffffff
+#define RADIO_CDC_ERROR_31_0_OFFSET 0x0
+#define RADIO_CDC_ERROR_31_0_WIDTH 0x20
+#define RADIO_CDC_ERROR_31_0_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_63_32_ADDR 0x34
+#define RADIO_CDC_ERROR_63_32_MASK 0xffffffff
+#define RADIO_CDC_ERROR_63_32_OFFSET 0x0
+#define RADIO_CDC_ERROR_63_32_WIDTH 0x20
+#define RADIO_CDC_ERROR_63_32_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_95_64_ADDR 0x38
+#define RADIO_CDC_ERROR_95_64_MASK 0xffffffff
+#define RADIO_CDC_ERROR_95_64_OFFSET 0x0
+#define RADIO_CDC_ERROR_95_64_WIDTH 0x20
+#define RADIO_CDC_ERROR_95_64_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_127_96_ADDR 0x3c
+#define RADIO_CDC_ERROR_127_96_MASK 0xffffffff
+#define RADIO_CDC_ERROR_127_96_OFFSET 0x0
+#define RADIO_CDC_ERROR_127_96_WIDTH 0x20
+#define RADIO_CDC_ERROR_127_96_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_31_0_ADDR 0x40
+#define RADIO_CDC_STATUS_31_0_MASK 0xffffffff
+#define RADIO_CDC_STATUS_31_0_OFFSET 0x0
+#define RADIO_CDC_STATUS_31_0_WIDTH 0x20
+#define RADIO_CDC_STATUS_31_0_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_63_32_ADDR 0x44
+#define RADIO_CDC_STATUS_63_32_MASK 0xffffffff
+#define RADIO_CDC_STATUS_63_32_OFFSET 0x0
+#define RADIO_CDC_STATUS_63_32_WIDTH 0x20
+#define RADIO_CDC_STATUS_63_32_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_95_64_ADDR 0x48
+#define RADIO_CDC_STATUS_95_64_MASK 0xffffffff
+#define RADIO_CDC_STATUS_95_64_OFFSET 0x0
+#define RADIO_CDC_STATUS_95_64_WIDTH 0x20
+#define RADIO_CDC_STATUS_95_64_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_127_96_ADDR 0x4c
+#define RADIO_CDC_STATUS_127_96_MASK 0xffffffff
+#define RADIO_CDC_STATUS_127_96_OFFSET 0x0
+#define RADIO_CDC_STATUS_127_96_WIDTH 0x20
+#define RADIO_CDC_STATUS_127_96_DEFAULT 0x0
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_radio_source
+ * with prefix fram_ @ address 0x1000
+ *-----------------------------------------------------------------------------
+ */
+/* Type = rwpdef */
+#define FRAM_PACKET_DATA_SIZE_ADDR 0x1000
+#define FRAM_PACKET_DATA_SIZE_MASK 0x7f
+#define FRAM_PACKET_DATA_SIZE_OFFSET 0x0
+#define FRAM_PACKET_DATA_SIZE_WIDTH 0x7
+#define FRAM_PACKET_DATA_SIZE_DEFAULT 0x0
+
+/* Type = rwpdef */
+#define FRAM_PAUSE_DATA_SIZE_ADDR 0x1004
+#define FRAM_PAUSE_DATA_SIZE_MASK 0x7f
+#define FRAM_PAUSE_DATA_SIZE_OFFSET 0x0
+#define FRAM_PAUSE_DATA_SIZE_WIDTH 0x7
+#define FRAM_PAUSE_DATA_SIZE_DEFAULT 0x0
diff --git a/drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c b/drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c
new file mode 100644
index 000000000000..c9b05866fd78
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c
@@ -0,0 +1,824 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "roe_radio_ctrl.h"
+#include "xroe-traffic-gen.h"
+
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+/**
+ * utils_sysfs_store_wrapper - Wraps the storing function for sysfs entries
+ * @dev: The structure containing the device's information
+ * @address: The address of the register to be written
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be written
+ * @value: The value to be written to the register
+ *
+ * Wraps the core functionality of all "store" functions of sysfs entries.
+ */
+static void utils_sysfs_store_wrapper(struct device *dev, u32 address,
+ u32 offset, u32 mask, u32 value)
+{
+ void __iomem *working_address;
+ u32 read_register_value = 0;
+ u32 register_value_to_write = 0;
+ u32 delta = 0;
+ u32 buffer = 0;
+ struct xroe_traffic_gen_local *lp = dev_get_drvdata(dev);
+
+ working_address = (void __iomem *)(lp->base_addr + address);
+ read_register_value = ioread32(working_address);
+ buffer = (value << offset);
+ register_value_to_write = read_register_value & ~mask;
+ delta = buffer & mask;
+ register_value_to_write |= delta;
+ iowrite32(register_value_to_write, working_address);
+}
+
+/**
+ * utils_sysfs_show_wrapper - Wraps the "show" function for sysfs entries
+ * @dev: The structure containing the device's information
+ * @address: The address of the register to be read
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be read
+ *
+ * Wraps the core functionality of all "show" functions of sysfs entries.
+ *
+ * Return: The value designated by the address, offset and mask
+ */
+static u32 utils_sysfs_show_wrapper(struct device *dev, u32 address, u32 offset,
+ u32 mask)
+{
+ void __iomem *working_address;
+ u32 buffer;
+ struct xroe_traffic_gen_local *lp = dev_get_drvdata(dev);
+
+ working_address = (void __iomem *)(lp->base_addr + address);
+ buffer = ioread32(working_address);
+ return (buffer & mask) >> offset;
+}
+
+/**
+ * radio_id_show - Returns the block's ID number
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's ID (0x1179649 by default)
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_id;
+
+ radio_id = utils_sysfs_show_wrapper(dev, RADIO_ID_ADDR,
+ RADIO_ID_OFFSET,
+ RADIO_ID_MASK);
+ return sprintf(buf, "%d\n", radio_id);
+}
+static DEVICE_ATTR_RO(radio_id);
+
+/**
+ * timeout_enable_show - Returns the traffic gen's timeout enable status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's timeout enable status to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t timeout_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 timeout_enable;
+
+ timeout_enable = utils_sysfs_show_wrapper(dev,
+ RADIO_TIMEOUT_ENABLE_ADDR,
+ RADIO_TIMEOUT_ENABLE_OFFSET,
+ RADIO_TIMEOUT_ENABLE_MASK);
+ return sprintf(buf, "%d\n", timeout_enable);
+}
+
+/**
+ * timeout_enable_store - Writes to the traffic gens's timeout enable
+ * status register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's timeout enable
+ * status to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t timeout_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 enable = 0;
+
+ strncpy(xroe_tmp, buf, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0)
+ enable = 1;
+ else if (strncmp(xroe_tmp, "false", xroe_size) == 0)
+ enable = 0;
+ utils_sysfs_store_wrapper(dev, RADIO_TIMEOUT_ENABLE_ADDR,
+ RADIO_TIMEOUT_ENABLE_OFFSET,
+ RADIO_TIMEOUT_ENABLE_MASK, enable);
+ return count;
+}
+static DEVICE_ATTR_RW(timeout_enable);
+
+/**
+ * timeout_status_show - Returns the timeout status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's timeout status (0x1 by default)
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t timeout_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 timeout;
+
+ timeout = utils_sysfs_show_wrapper(dev, RADIO_TIMEOUT_STATUS_ADDR,
+ RADIO_TIMEOUT_STATUS_OFFSET,
+ RADIO_TIMEOUT_STATUS_MASK);
+ return sprintf(buf, "%d\n", timeout);
+}
+static DEVICE_ATTR_RO(timeout_status);
+
+/**
+ * timeout_enable_show - Returns the traffic gen's timeout value
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's timeout value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t timeout_value_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 timeout_value;
+
+ timeout_value = utils_sysfs_show_wrapper(dev, RADIO_TIMEOUT_VALUE_ADDR,
+ RADIO_TIMEOUT_VALUE_OFFSET,
+ RADIO_TIMEOUT_VALUE_MASK);
+ return sprintf(buf, "%d\n", timeout_value);
+}
+
+/**
+ * timeout_enable_store - Writes to the traffic gens's timeout value
+ * status register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's timeout value
+ * to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t timeout_value_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 timeout_value;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &timeout_value);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_TIMEOUT_VALUE_ADDR,
+ RADIO_TIMEOUT_VALUE_OFFSET,
+ RADIO_TIMEOUT_VALUE_MASK, timeout_value);
+ return count;
+}
+static DEVICE_ATTR_RW(timeout_value);
+
+/**
+ * ledmode_show - Returns the current LED mode
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's LED mode value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t ledmode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u32 ledmode;
+
+ ledmode = utils_sysfs_show_wrapper(dev, RADIO_GPIO_CDC_LEDMODE2_ADDR,
+ RADIO_GPIO_CDC_LEDMODE2_OFFSET,
+ RADIO_GPIO_CDC_LEDMODE2_MASK);
+ return sprintf(buf, "%d\n", ledmode);
+}
+
+/**
+ * ledmode_store - Writes to the current LED mode register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's LED mode value
+ * to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t ledmode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 ledmode;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &ledmode);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_GPIO_CDC_LEDMODE2_ADDR,
+ RADIO_GPIO_CDC_LEDMODE2_OFFSET,
+ RADIO_GPIO_CDC_LEDMODE2_MASK, ledmode);
+ return count;
+}
+static DEVICE_ATTR_RW(ledmode);
+
+/**
+ * ledgpio_show - Returns the current LED gpio
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's LED gpio value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t ledgpio_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u32 ledgpio;
+
+ ledgpio = utils_sysfs_show_wrapper(dev, RADIO_GPIO_CDC_LEDGPIO_ADDR,
+ RADIO_GPIO_CDC_LEDGPIO_OFFSET,
+ RADIO_GPIO_CDC_LEDGPIO_MASK);
+ return sprintf(buf, "%d\n", ledgpio);
+}
+
+/**
+ * ledgpio_store - Writes to the current LED gpio register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's LED gpio value
+ * to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t ledgpio_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 ledgpio;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &ledgpio);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_GPIO_CDC_LEDGPIO_ADDR,
+ RADIO_GPIO_CDC_LEDGPIO_OFFSET,
+ RADIO_GPIO_CDC_LEDGPIO_MASK, ledgpio);
+ return count;
+}
+static DEVICE_ATTR_RW(ledgpio);
+
+/**
+ * dip_status_show - Returns the current DIP switch value
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the GPIO DIP switch value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t dip_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 dip_status;
+
+ dip_status = utils_sysfs_show_wrapper(dev, RADIO_GPIO_CDC_LEDGPIO_ADDR,
+ RADIO_GPIO_CDC_LEDGPIO_OFFSET,
+ RADIO_GPIO_CDC_LEDGPIO_MASK);
+ return sprintf(buf, "0x%08x\n", dip_status);
+}
+static DEVICE_ATTR_RO(dip_status);
+
+/**
+ * sw_trigger_show - Returns the current SW trigger status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's SW trigger status value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t sw_trigger_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 sw_trigger;
+
+ sw_trigger = utils_sysfs_show_wrapper(dev, RADIO_SW_TRIGGER_ADDR,
+ RADIO_SW_TRIGGER_OFFSET,
+ RADIO_SW_TRIGGER_MASK);
+ return sprintf(buf, "%d\n", sw_trigger);
+}
+
+/**
+ * sw_trigger_store - Writes to the SW trigger status register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's SW trigger
+ * value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t sw_trigger_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 sw_trigger;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &sw_trigger);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_SW_TRIGGER_ADDR,
+ RADIO_SW_TRIGGER_OFFSET,
+ RADIO_SW_TRIGGER_MASK, sw_trigger);
+ return count;
+}
+static DEVICE_ATTR_RW(sw_trigger);
+
+/**
+ * radio_enable_show - Returns the current radio enable status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's radio enable value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_enable;
+
+ radio_enable = utils_sysfs_show_wrapper(dev, RADIO_CDC_ENABLE_ADDR,
+ RADIO_CDC_ENABLE_OFFSET,
+ RADIO_CDC_ENABLE_MASK);
+ return sprintf(buf, "%d\n", radio_enable);
+}
+
+/**
+ * radio_enable_store - Writes to the radio enable register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's radio enable
+ * value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t radio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 radio_enable;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &radio_enable);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_CDC_ENABLE_ADDR,
+ RADIO_CDC_ENABLE_OFFSET,
+ RADIO_CDC_ENABLE_MASK,
+ radio_enable);
+ return count;
+}
+static DEVICE_ATTR_RW(radio_enable);
+
+/**
+ * radio_error_show - Returns the current radio error status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the error status
+ *
+ * Reads and writes the traffic gen's radio error value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_error;
+
+ radio_error = utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_ADDR,
+ RADIO_CDC_STATUS_OFFSET,
+ RADIO_CDC_STATUS_MASK);
+ return sprintf(buf, "%d\n", radio_error);
+}
+static DEVICE_ATTR_RO(radio_error);
+
+/**
+ * radio_status_show - Returns the current radio status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the status
+ *
+ * Reads and writes the traffic gen's radio status value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_status;
+
+ radio_status = utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_ADDR,
+ RADIO_CDC_STATUS_OFFSET,
+ RADIO_CDC_STATUS_MASK);
+ return sprintf(buf, "%d\n", radio_status);
+}
+static DEVICE_ATTR_RO(radio_status);
+
+/**
+ * radio_loopback_show - Returns the current radio loopback status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's radio loopback value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_loopback_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_loopback;
+
+ radio_loopback = utils_sysfs_show_wrapper(dev,
+ RADIO_CDC_LOOPBACK_ADDR,
+ RADIO_CDC_LOOPBACK_OFFSET,
+ RADIO_CDC_LOOPBACK_MASK);
+ return sprintf(buf, "%d\n", radio_loopback);
+}
+
+/**
+ * radio_loopback_store - Writes to the radio loopback register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's radio loopback
+ * value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t radio_loopback_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 radio_loopback;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &radio_loopback);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_CDC_LOOPBACK_ADDR,
+ RADIO_CDC_LOOPBACK_OFFSET,
+ RADIO_CDC_LOOPBACK_MASK, radio_loopback);
+ return count;
+}
+static DEVICE_ATTR_RW(radio_loopback);
+
+/**
+ * radio_sink_enable_show - Returns the current radio sink enable status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's radio sink enable value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_sink_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 sink_enable;
+
+ sink_enable = utils_sysfs_show_wrapper(dev, RADIO_SINK_ENABLE_ADDR,
+ RADIO_SINK_ENABLE_OFFSET,
+ RADIO_SINK_ENABLE_MASK);
+ return sprintf(buf, "%d\n", sink_enable);
+}
+
+/**
+ * radio_sink_enable_store - Writes to the radio sink enable register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's radio sink
+ * enable value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t radio_sink_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 sink_enable;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &sink_enable);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_SINK_ENABLE_ADDR,
+ RADIO_SINK_ENABLE_OFFSET,
+ RADIO_SINK_ENABLE_MASK, sink_enable);
+ return count;
+}
+static DEVICE_ATTR_RW(radio_sink_enable);
+
+/**
+ * antenna_status_show - Returns the status for all antennas
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's status for all antennas
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t antenna_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 status_0_31;
+ u32 status_63_32;
+ u32 status_95_64;
+ u32 status_127_96;
+
+ status_0_31 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_31_0_ADDR,
+ RADIO_CDC_STATUS_31_0_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_31_0_MASK));
+ status_63_32 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_63_32_ADDR,
+ RADIO_CDC_STATUS_63_32_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_63_32_MASK));
+ status_95_64 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_95_64_ADDR,
+ RADIO_CDC_STATUS_95_64_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_95_64_MASK));
+ status_127_96 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_127_96_ADDR,
+ RADIO_CDC_STATUS_127_96_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_127_96_MASK));
+
+ return sprintf(buf, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ status_0_31, status_63_32, status_95_64, status_127_96);
+}
+static DEVICE_ATTR_RO(antenna_status);
+
+/**
+ * antenna_error_show - Returns the error for all antennas
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's error for all antennas
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t antenna_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 error_0_31;
+ u32 error_63_32;
+ u32 error_95_64;
+ u32 error_127_96;
+
+ error_0_31 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_31_0_ADDR,
+ RADIO_CDC_ERROR_31_0_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_31_0_MASK));
+ error_63_32 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_63_32_ADDR,
+ RADIO_CDC_ERROR_63_32_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_63_32_MASK));
+ error_95_64 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_95_64_ADDR,
+ RADIO_CDC_ERROR_95_64_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_95_64_MASK));
+ error_127_96 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_127_96_ADDR,
+ RADIO_CDC_ERROR_127_96_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_127_96_MASK));
+
+ return sprintf(buf, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ error_0_31, error_63_32, error_95_64, error_127_96);
+}
+static DEVICE_ATTR_RO(antenna_error);
+
+/**
+ * framer_packet_size_show - Returns the size of the framer's packet
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's framer packet size value
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t framer_packet_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 packet_size;
+
+ packet_size = utils_sysfs_show_wrapper(dev, FRAM_PACKET_DATA_SIZE_ADDR,
+ FRAM_PACKET_DATA_SIZE_OFFSET,
+ FRAM_PACKET_DATA_SIZE_MASK);
+ return sprintf(buf, "%d\n", packet_size);
+}
+
+/**
+ * framer_packet_size_store - Writes to the framer's packet size register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's framer packet
+ * size value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t framer_packet_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 packet_size;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &packet_size);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, FRAM_PACKET_DATA_SIZE_ADDR,
+ FRAM_PACKET_DATA_SIZE_OFFSET,
+ FRAM_PACKET_DATA_SIZE_MASK, packet_size);
+ return count;
+}
+static DEVICE_ATTR_RW(framer_packet_size);
+
+/**
+ * framer_pause_size_show - Returns the size of the framer's pause
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's framer pause size value
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t framer_pause_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 pause_size;
+
+ pause_size = utils_sysfs_show_wrapper(dev, FRAM_PAUSE_DATA_SIZE_ADDR,
+ FRAM_PAUSE_DATA_SIZE_OFFSET,
+ FRAM_PAUSE_DATA_SIZE_MASK);
+ return sprintf(buf, "%d\n", pause_size);
+}
+
+/**
+ * framer_pause_size_store - Writes to the framer's pause size register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's framer pause
+ * size value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t framer_pause_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 pause_size;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &pause_size);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, FRAM_PAUSE_DATA_SIZE_ADDR,
+ FRAM_PAUSE_DATA_SIZE_OFFSET,
+ FRAM_PAUSE_DATA_SIZE_MASK, pause_size);
+ return count;
+}
+static DEVICE_ATTR_RW(framer_pause_size);
+
+static struct attribute *xroe_traffic_gen_attrs[] = {
+ &dev_attr_radio_id.attr,
+ &dev_attr_timeout_enable.attr,
+ &dev_attr_timeout_status.attr,
+ &dev_attr_timeout_value.attr,
+ &dev_attr_ledmode.attr,
+ &dev_attr_ledgpio.attr,
+ &dev_attr_dip_status.attr,
+ &dev_attr_sw_trigger.attr,
+ &dev_attr_radio_enable.attr,
+ &dev_attr_radio_error.attr,
+ &dev_attr_radio_status.attr,
+ &dev_attr_radio_loopback.attr,
+ &dev_attr_radio_sink_enable.attr,
+ &dev_attr_antenna_status.attr,
+ &dev_attr_antenna_error.attr,
+ &dev_attr_framer_packet_size.attr,
+ &dev_attr_framer_pause_size.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(xroe_traffic_gen);
+
+/**
+ * xroe_traffic_gen_sysfs_init - Creates the xroe sysfs directory and entries
+ * @dev: The device's structure
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroetrafficgen sysfs directory and entries
+ */
+int xroe_traffic_gen_sysfs_init(struct device *dev)
+{
+ int ret;
+
+ dev->groups = xroe_traffic_gen_groups;
+ ret = sysfs_create_group(&dev->kobj, *xroe_traffic_gen_groups);
+ if (ret)
+ dev_err(dev, "sysfs creation failed\n");
+
+ return ret;
+}
+
+/**
+ * xroe_traffic_gen_sysfs_exit - Deletes the xroe sysfs directory and entries
+ * @dev: The device's structure
+ *
+ * Deletes the xroetrafficgen sysfs directory and entries
+ */
+void xroe_traffic_gen_sysfs_exit(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, *xroe_traffic_gen_groups);
+}
diff --git a/drivers/staging/xroetrafficgen/xroe-traffic-gen.c b/drivers/staging/xroetrafficgen/xroe-traffic-gen.c
new file mode 100644
index 000000000000..1ed6e488d38d
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/xroe-traffic-gen.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include "xroe-traffic-gen.h"
+
+#define DRIVER_NAME "xroe_traffic_gen"
+
+static struct platform_driver xroe_traffic_gen_driver;
+
+/**
+ * xroe_traffic_gen_probe - Probes the device tree to locate the traffic gen
+ * block
+ * @pdev: The structure containing the device's details
+ *
+ * Probes the device tree to locate the traffic gen block and maps it to
+ * the kernel virtual memory space
+ *
+ * Return: 0 on success or a negative errno on error.
+ */
+static int xroe_traffic_gen_probe(struct platform_device *pdev)
+{
+ struct xroe_traffic_gen_local *lp;
+ struct resource *r_mem; /* IO mem resources */
+ struct device *dev = &pdev->dev;
+
+ lp = devm_kzalloc(&pdev->dev, sizeof(*lp), GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+
+ /* Get iospace for the device */
+ /*
+ * TODO: Use platform_get_resource_byname() instead when the DT entry
+ * of the traffic gen block has been finalised (when it gets out of
+ * the development stage).
+ */
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->base_addr = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(lp->base_addr))
+ return PTR_ERR(lp->base_addr);
+
+ dev_set_drvdata(dev, lp);
+ xroe_traffic_gen_sysfs_init(dev);
+ return 0;
+}
+
+/**
+ * xroe_traffic_gen_remove - Removes the sysfs entries created by the driver
+ * @pdev: The structure containing the device's details
+ *
+ * Removes the sysfs entries created by the driver
+ *
+ * Return: 0
+ */
+static int xroe_traffic_gen_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ xroe_traffic_gen_sysfs_exit(dev);
+ return 0;
+}
+
+/**
+ * xroe_traffic_gen_init - Registers the driver
+ *
+ * Return: 0 on success, -1 on allocation error
+ *
+ * Registers the traffic gen driver and creates the sysfs entries related
+ * to it
+ */
+static int __init xroe_traffic_gen_init(void)
+{
+ int ret;
+
+ pr_info("XROE traffic generator driver init\n");
+ ret = platform_driver_register(&xroe_traffic_gen_driver);
+ return ret;
+}
+
+/**
+ * xroe_traffic_gen_exit - Destroys the driver
+ *
+ * Unregisters the traffic gen driver
+ */
+static void __exit xroe_traffic_gen_exit(void)
+{
+ platform_driver_unregister(&xroe_traffic_gen_driver);
+ pr_debug("XROE traffic generator driver exit\n");
+}
+
+static const struct of_device_id xroe_traffic_gen_of_match[] = {
+ { .compatible = "xlnx,roe-traffic-gen-1.0", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xroe_traffic_gen_of_match);
+
+static struct platform_driver xroe_traffic_gen_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xroe_traffic_gen_of_match,
+ },
+ .probe = xroe_traffic_gen_probe,
+ .remove = xroe_traffic_gen_remove,
+};
+
+module_init(xroe_traffic_gen_init);
+module_exit(xroe_traffic_gen_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx Radio over Ethernet Traffic Generator driver");
diff --git a/drivers/staging/xroetrafficgen/xroe-traffic-gen.h b/drivers/staging/xroetrafficgen/xroe-traffic-gen.h
new file mode 100644
index 000000000000..55d968d89e10
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/xroe-traffic-gen.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+struct xroe_traffic_gen_local {
+ void __iomem *base_addr;
+};
+
+enum { XROE_SIZE_MAX = 15 };
+
+int xroe_traffic_gen_sysfs_init(struct device *dev);
+void xroe_traffic_gen_sysfs_exit(struct device *dev);
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 02629a1f193d..8fedd2de1f68 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -6,12 +6,40 @@
#include <asm/dcc.h>
#include <asm/processor.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/serial_core.h>
+
#include "hvc_console.h"
/* DCC Status Bits */
#define DCC_STATUS_RX (1 << 30)
#define DCC_STATUS_TX (1 << 29)
+static void dcc_uart_console_putchar(struct uart_port *port, int ch)
+{
+ while (__dcc_getstatus() & DCC_STATUS_TX)
+ cpu_relax();
+
+ __dcc_putchar(ch);
+}
+
+static void dcc_early_write(struct console *con, const char *s, unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, dcc_uart_console_putchar);
+}
+
+static int __init dcc_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->con->write = dcc_early_write;
+
+ return 0;
+}
+EARLYCON_DECLARE(dcc, dcc_early_console_setup);
+
static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
{
int i;
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index c15c0cf059e2..4e5f89428f97 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -104,8 +104,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->mapsize = resource_size(&resource);
/* Check for shifted address mapping */
- if (of_property_read_u32(np, "reg-offset", &prop) == 0)
+ if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
port->mapbase += prop;
+ port->mapsize -= prop;
+ }
port->iotype = UPIO_MEM;
if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 75b1de40690a..d697db575e85 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -577,15 +577,6 @@ config SERIAL_UARTLITE_CONSOLE
console (the system console is the device which receives all kernel
messages and warnings and which allows logins in single user mode).
-config SERIAL_UARTLITE_NR_UARTS
- int "Maximum number of uartlite serial ports"
- depends on SERIAL_UARTLITE
- range 1 256
- default 1
- help
- Set this to the number of uartlites in your system, or the number
- you think you might implement.
-
config SERIAL_SUNCORE
bool
depends on SPARC
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 06e79c11141d..f5a581d8367f 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -27,7 +27,6 @@
#define ULITE_NAME "ttyUL"
#define ULITE_MAJOR 204
#define ULITE_MINOR 187
-#define ULITE_NR_UARTS CONFIG_SERIAL_UARTLITE_NR_UARTS
/* ---------------------------------------------------------------------
* Register definitions
@@ -65,6 +64,7 @@ static struct uart_port *console_port;
struct uartlite_data {
const struct uartlite_reg_ops *reg_ops;
struct clk *clk;
+ int id;
struct uart_driver *ulite_uart_driver;
};
@@ -117,7 +117,6 @@ static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
pdata->reg_ops->out(val, port->membase + offset);
}
-static struct uart_port ulite_ports[ULITE_NR_UARTS];
/* ---------------------------------------------------------------------
* Core UART driver operations
@@ -535,18 +534,6 @@ static int ulite_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
-static struct uart_driver ulite_uart_driver;
-
-static struct console ulite_console = {
- .name = ULITE_NAME,
- .write = ulite_console_write,
- .device = uart_console_device,
- .setup = ulite_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1, /* Specified on the cmdline (e.g. console=ttyUL0 ) */
- .data = &ulite_uart_driver,
-};
-
static void early_uartlite_putc(struct uart_port *port, int c)
{
/*
@@ -590,18 +577,6 @@ OF_EARLYCON_DECLARE(uartlite_a, "xlnx,xps-uartlite-1.00.a", early_uartlite_setup
#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
-static struct uart_driver ulite_uart_driver = {
- .owner = THIS_MODULE,
- .driver_name = "uartlite",
- .dev_name = ULITE_NAME,
- .major = ULITE_MAJOR,
- .minor = ULITE_MINOR,
- .nr = ULITE_NR_UARTS,
-#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
- .cons = &ulite_console,
-#endif
-};
-
/* ---------------------------------------------------------------------
* Port assignment functions (mapping devices to uart_port structures)
*/
@@ -622,24 +597,9 @@ static int ulite_assign(struct device *dev, int id, u32 base, int irq,
struct uart_port *port;
int rc;
- /* if id = -1; then scan for a free id and use that */
- if (id < 0) {
- for (id = 0; id < ULITE_NR_UARTS; id++)
- if (ulite_ports[id].mapbase == 0)
- break;
- }
- if (id < 0 || id >= ULITE_NR_UARTS) {
- dev_err(dev, "%s%i too large\n", ULITE_NAME, id);
- return -EINVAL;
- }
-
- if ((ulite_ports[id].mapbase) && (ulite_ports[id].mapbase != base)) {
- dev_err(dev, "cannot assign to %s%i; it is already in use\n",
- ULITE_NAME, id);
- return -EBUSY;
- }
-
- port = &ulite_ports[id];
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
spin_lock_init(&port->lock);
port->fifosize = 16;
@@ -653,7 +613,6 @@ static int ulite_assign(struct device *dev, int id, u32 base, int irq,
port->flags = UPF_BOOT_AUTOCONF;
port->dev = dev;
port->type = PORT_UNKNOWN;
- port->line = id;
port->private_data = pdata;
dev_set_drvdata(dev, port);
@@ -665,12 +624,12 @@ static int ulite_assign(struct device *dev, int id, u32 base, int irq,
* If register_console() don't assign value, then console_port pointer
* is cleanup.
*/
- if (ulite_uart_driver.cons->index == -1)
+ if (!console_port)
console_port = port;
#endif
/* Register the port */
- rc = uart_add_one_port(&ulite_uart_driver, port);
+ rc = uart_add_one_port(pdata->ulite_uart_driver, port);
if (rc) {
dev_err(dev, "uart_add_one_port() failed; err=%i\n", rc);
port->mapbase = 0;
@@ -680,7 +639,8 @@ static int ulite_assign(struct device *dev, int id, u32 base, int irq,
#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
/* This is not port which is used for console that's why clean it up */
- if (ulite_uart_driver.cons->index == -1)
+ if (console_port == port &&
+ !(pdata->ulite_uart_driver->cons->flags & CON_ENABLED))
console_port = NULL;
#endif
@@ -782,11 +742,24 @@ static const struct of_device_id ulite_of_match[] = {
MODULE_DEVICE_TABLE(of, ulite_of_match);
#endif /* CONFIG_OF */
-static int ulite_probe(struct platform_device *pdev)
+/*
+ * Maximum number of instances without alias IDs but if there is alias
+ * which target "< MAX_UART_INSTANCES" range this ID can't be used.
+ */
+#define MAX_UART_INSTANCES 256
+
+/* Stores static aliases list */
+static DECLARE_BITMAP(alias_bitmap, MAX_UART_INSTANCES);
+static int alias_bitmap_initialized;
+
+/* Stores actual bitmap of allocated IDs with alias IDs together */
+static DECLARE_BITMAP(bitmap, MAX_UART_INSTANCES);
+/* Protect bitmap operations to have unique IDs */
+static DEFINE_MUTEX(bitmap_lock);
+
+static int ulite_get_id(struct platform_device *pdev)
{
- struct resource *res;
- struct uartlite_data *pdata;
- int irq, ret;
+ int ret;
int id = pdev->id;
#ifdef CONFIG_OF
const __be32 *prop;
@@ -795,39 +768,158 @@ static int ulite_probe(struct platform_device *pdev)
if (prop)
id = be32_to_cpup(prop);
#endif
- if (id < 0) {
- /* Look for a serialN alias */
- id = of_alias_get_id(pdev->dev.of_node, "serial");
- if (id < 0)
- id = 0;
- }
- if (!ulite_uart_driver.state) {
- dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
- ret = uart_register_driver(&ulite_uart_driver);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register driver\n");
+ mutex_lock(&bitmap_lock);
+
+ /* Alias list is stable that's why get alias bitmap only once */
+ if (!alias_bitmap_initialized) {
+ ret = of_alias_get_alias_list(of_match_ptr(ulite_of_match), "serial",
+ alias_bitmap, MAX_UART_INSTANCES);
+ if (ret) {
+ mutex_unlock(&bitmap_lock);
return ret;
}
+
+ alias_bitmap_initialized++;
+ }
+
+ /* Make sure that alias ID is not taken by instance without alias */
+ bitmap_or(bitmap, bitmap, alias_bitmap, MAX_UART_INSTANCES);
+
+ dev_dbg(&pdev->dev, "Alias bitmap: %*pb\n",
+ MAX_UART_INSTANCES, bitmap);
+
+ /* Look for a serialN alias */
+ if (id < 0)
+ id = of_alias_get_id(pdev->dev.of_node, "serial");
+
+ if (id < 0) {
+ dev_warn(&pdev->dev,
+ "No serial alias passed. Using the first free id\n");
+
+ /*
+ * Start with id 0 and check if there is no serial0 alias
+ * which points to device which is compatible with this driver.
+ * If alias exists then try next free position.
+ */
+ id = 0;
+
+ for (;;) {
+ dev_info(&pdev->dev, "Checking id %d\n", id);
+ id = find_next_zero_bit(bitmap, MAX_UART_INSTANCES, id);
+
+ /* No free empty instance */
+ if (id == MAX_UART_INSTANCES) {
+ dev_err(&pdev->dev, "No free ID\n");
+ mutex_unlock(&bitmap_lock);
+ return -EINVAL;
+ }
+
+ dev_dbg(&pdev->dev, "The empty id is %d\n", id);
+ /* Check if ID is empty */
+ if (!test_and_set_bit(id, bitmap)) {
+ /* Break the loop if bit is taken */
+ dev_dbg(&pdev->dev,
+ "Selected ID %d allocation passed\n",
+ id);
+ break;
+ }
+ dev_dbg(&pdev->dev,
+ "Selected ID %d allocation failed\n", id);
+ /* if taking bit fails then try next one */
+ id++;
+ }
}
+ mutex_unlock(&bitmap_lock);
+
+ return id;
+}
+
+static int ulite_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct uartlite_data *pdata;
+ int irq, ret;
+ struct uart_driver *ulite_uart_driver;
+ char *driver_name;
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+ struct console *ulite_console;
+#endif
+
pdata = devm_kzalloc(&pdev->dev, sizeof(struct uartlite_data),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
+ ulite_uart_driver = devm_kzalloc(&pdev->dev,
+ sizeof(*ulite_uart_driver),
+ GFP_KERNEL);
+ if (!ulite_uart_driver)
+ return -ENOMEM;
+
+ pdata->id = ulite_get_id(pdev);
+ if (pdata->id < 0)
+ return pdata->id;
+
+ /* There is a need to use unique driver name */
+ driver_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s%d",
+ ULITE_NAME, pdata->id);
+ if (!driver_name) {
+ ret = -ENOMEM;
+ goto err_out_id;
+ }
+
+ ulite_uart_driver->owner = THIS_MODULE;
+ ulite_uart_driver->driver_name = driver_name;
+ ulite_uart_driver->dev_name = ULITE_NAME;
+ ulite_uart_driver->major = ULITE_MAJOR;
+ ulite_uart_driver->minor = pdata->id;
+ ulite_uart_driver->nr = 1;
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+ ulite_console = devm_kzalloc(&pdev->dev, sizeof(*ulite_console),
+ GFP_KERNEL);
+ if (!ulite_console) {
+ ret = -ENOMEM;
+ goto err_out_id;
+ }
+
+ strncpy(ulite_console->name, ULITE_NAME,
+ sizeof(ulite_console->name));
+ ulite_console->index = pdata->id;
+ ulite_console->write = ulite_console_write;
+ ulite_console->device = uart_console_device;
+ ulite_console->setup = ulite_console_setup;
+ ulite_console->flags = CON_PRINTBUFFER;
+ ulite_uart_driver->cons = ulite_console;
+ ulite_console->data = ulite_uart_driver;
+#endif
+
+ dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
+ ret = uart_register_driver(ulite_uart_driver);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register driver\n");
+ goto err_out_id;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
+ if (!res) {
+ ret = -ENODEV;
+ goto err_out_unregister_driver;
+ }
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -ENXIO;
+ if (irq <= 0) {
+ ret = -ENXIO;
+ goto err_out_unregister_driver;
+ }
pdata->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
if (IS_ERR(pdata->clk)) {
- if (PTR_ERR(pdata->clk) != -ENOENT)
- return PTR_ERR(pdata->clk);
+ if (PTR_ERR(pdata->clk) != -ENOENT) {
+ ret = PTR_ERR(pdata->clk);
+ goto err_out_unregister_driver;
+ }
/*
* Clock framework support is optional, continue on
@@ -836,11 +928,10 @@ static int ulite_probe(struct platform_device *pdev)
pdata->clk = NULL;
}
- pdata->ulite_uart_driver = &ulite_uart_driver;
ret = clk_prepare_enable(pdata->clk);
if (ret) {
dev_err(&pdev->dev, "Failed to prepare clock\n");
- return ret;
+ goto err_out_unregister_driver;
}
pm_runtime_use_autosuspend(&pdev->dev);
@@ -848,11 +939,27 @@ static int ulite_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata);
+ ulite_uart_driver->tty_driver->name_base = pdata->id;
+ pdata->ulite_uart_driver = ulite_uart_driver;
+ ret = ulite_assign(&pdev->dev, pdata->id, res->start, irq, pdata);
+ if (ret < 0)
+ goto err_out_clk_disable;
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
+ return 0;
+err_out_clk_disable:
+ clk_disable_unprepare(pdata->clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+err_out_unregister_driver:
+ uart_unregister_driver(ulite_uart_driver);
+err_out_id:
+ mutex_lock(&bitmap_lock);
+ clear_bit(pdata->id, bitmap);
+ mutex_unlock(&bitmap_lock);
return ret;
}
@@ -864,6 +971,16 @@ static int ulite_remove(struct platform_device *pdev)
clk_unprepare(pdata->clk);
rc = ulite_release(&pdev->dev);
+ mutex_lock(&bitmap_lock);
+ clear_bit(pdata->id, bitmap);
+ mutex_unlock(&bitmap_lock);
+
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+ if (console_port == port)
+ console_port = NULL;
+#endif
+
+ uart_unregister_driver(pdata->ulite_uart_driver);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
@@ -889,7 +1006,6 @@ static struct platform_driver ulite_platform_driver = {
static int __init ulite_init(void)
{
-
pr_debug("uartlite: calling platform_driver_register()\n");
return platform_driver_register(&ulite_platform_driver);
}
@@ -897,8 +1013,7 @@ static int __init ulite_init(void)
static void __exit ulite_exit(void)
{
platform_driver_unregister(&ulite_platform_driver);
- if (ulite_uart_driver.state)
- uart_unregister_driver(&ulite_uart_driver);
+ uart_unregister_driver(&ulite_uart_driver);
}
module_init(ulite_init);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 4265f7a696be..ae0edad59cdc 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes");
/* Rx Timeout */
static int rx_timeout = 10;
-module_param(rx_timeout, uint, S_IRUGO);
+module_param(rx_timeout, uint, 0444);
MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
/* Register offsets for the UART. */
@@ -199,7 +199,7 @@ struct cdns_platform_data {
u32 quirks;
};
#define to_cdns_uart(_nb) container_of(_nb, struct cdns_uart, \
- clk_rate_change_nb);
+ clk_rate_change_nb)
/**
* cdns_uart_handle_rx - Handle the received bytes along with Rx errors.
@@ -312,7 +312,8 @@ static void cdns_uart_handle_tx(void *dev_id)
} else {
numbytes = port->fifosize;
while (numbytes && !uart_circ_empty(&port->state->xmit) &&
- !(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)) {
+ !(readl(port->membase + CDNS_UART_SR) &
+ CDNS_UART_SR_TXFULL)) {
/*
* Get the data from the UART circular buffer
* and write it to the cdns_uart's TX_FIFO
@@ -365,6 +366,8 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
cdns_uart_handle_tx(dev_id);
isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
}
+ isrstatus &= port->read_status_mask;
+ isrstatus &= ~port->ignore_status_mask;
/*
* Skip RX processing if RX is disabled as RXEMPTY will never be set
@@ -1073,8 +1076,6 @@ static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
cpu_relax();
spin_unlock_irqrestore(&port->lock, flags);
-
- return;
}
#endif
@@ -1539,6 +1540,8 @@ static int cdns_uart_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
port->private_data = cdns_uart_data;
+ port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG |
+ CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT;
cdns_uart_data->port = port;
platform_set_drvdata(pdev, port);
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 202ee81cfc2b..bae8e2904c56 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -2,6 +2,7 @@
menuconfig UIO
tristate "Userspace I/O drivers"
depends on MMU
+ select DMA_SHARED_BUFFER
help
Enable this to allow the userspace driver core code to be
built. This code allows userspace programs easy access to
@@ -165,4 +166,27 @@ config UIO_HV_GENERIC
to network and storage devices from userspace.
If you compile this as a module, it will be called uio_hv_generic.
+
+config UIO_XILINX_APM
+ tristate "Xilinx AXI Performance Monitor driver"
+ depends on MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP
+ help
+ This driver is developed for AXI Performance Monitor IP, designed to
+ monitor AXI4 traffic for performance analysis of AXI bus in the
+ system. Driver maps HW registers and parameters to userspace.
+
+ To compile this driver as a module, choose M here; the module
+ will be called uio_xilinx_apm.
+
+config UIO_XILINX_AI_ENGINE
+ tristate "Xilinx AI Engine driver"
+ select IRQ_SIM
+ select UIO_DMEM_GENIRQ
+ select UIO_PDRV_GENIRQ
+ help
+ The driver for Xilinx AI Engine that utilizes the uio_dmem_genirq.
+ The userspace library will use this to interact with the AI Engine
+ hardware, as well as for the memory allocation.
+ Say 'y' only for platforms with the AI Engine IP.
+
endif
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index c285dd2a4539..7af888a0228a 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_UIO) += uio.o
+obj-$(CONFIG_UIO) += uio.o uio_dmabuf.o
obj-$(CONFIG_UIO_CIF) += uio_cif.o
obj-$(CONFIG_UIO_PDRV_GENIRQ) += uio_pdrv_genirq.o
obj-$(CONFIG_UIO_DMEM_GENIRQ) += uio_dmem_genirq.o
@@ -9,5 +9,7 @@ obj-$(CONFIG_UIO_PCI_GENERIC) += uio_pci_generic.o
obj-$(CONFIG_UIO_NETX) += uio_netx.o
obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o
obj-$(CONFIG_UIO_MF624) += uio_mf624.o
+obj-$(CONFIG_UIO_XILINX_APM) += uio_xilinx_apm.o
obj-$(CONFIG_UIO_FSL_ELBC_GPCM) += uio_fsl_elbc_gpcm.o
obj-$(CONFIG_UIO_HV_GENERIC) += uio_hv_generic.o
+obj-$(CONFIG_UIO_XILINX_AI_ENGINE) += uio_xilinx_ai_engine.o
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index a57698985f9c..61b28d8be7f4 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -24,6 +24,12 @@
#include <linux/kobject.h>
#include <linux/cdev.h>
#include <linux/uio_driver.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <uapi/linux/uio/uio.h>
+
+#include "uio_dmabuf.h"
#define UIO_MAX_DEVICES (1U << MINORBITS)
@@ -454,6 +460,8 @@ static irqreturn_t uio_interrupt(int irq, void *dev_id)
struct uio_listener {
struct uio_device *dev;
s32 event_count;
+ struct list_head dbufs;
+ struct mutex dbufs_lock; /* protect @dbufs */
};
static int uio_open(struct inode *inode, struct file *filep)
@@ -500,6 +508,9 @@ static int uio_open(struct inode *inode, struct file *filep)
if (ret)
goto err_infoopen;
+ INIT_LIST_HEAD(&listener->dbufs);
+ mutex_init(&listener->dbufs_lock);
+
return 0;
err_infoopen:
@@ -529,6 +540,10 @@ static int uio_release(struct inode *inode, struct file *filep)
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
+ ret = uio_dmabuf_cleanup(idev, &listener->dbufs, &listener->dbufs_lock);
+ if (ret)
+ dev_err(&idev->dev, "failed to clean up the dma bufs\n");
+
mutex_lock(&idev->info_lock);
if (idev->info && idev->info->release)
ret = idev->info->release(idev->info, inode);
@@ -652,6 +667,33 @@ out:
return retval ? retval : sizeof(s32);
}
+static long uio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct uio_listener *listener = filep->private_data;
+ struct uio_device *idev = listener->dev;
+ long ret;
+
+ if (!idev->info)
+ return -EIO;
+
+ switch (cmd) {
+ case UIO_IOC_MAP_DMABUF:
+ ret = uio_dmabuf_map(idev, &listener->dbufs,
+ &listener->dbufs_lock, (void __user *)arg);
+ break;
+ case UIO_IOC_UNMAP_DMABUF:
+ ret = uio_dmabuf_unmap(idev, &listener->dbufs,
+ &listener->dbufs_lock,
+ (void __user *)arg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int uio_find_mem_index(struct vm_area_struct *vma)
{
struct uio_device *idev = vma->vm_private_data;
@@ -821,6 +863,7 @@ static const struct file_operations uio_fops = {
.write = uio_write,
.mmap = uio_mmap,
.poll = uio_poll,
+ .unlocked_ioctl = uio_ioctl,
.fasync = uio_fasync,
.llseek = noop_llseek,
};
diff --git a/drivers/uio/uio_dmabuf.c b/drivers/uio/uio_dmabuf.c
new file mode 100644
index 000000000000..b18f1469f6c8
--- /dev/null
+++ b/drivers/uio/uio_dmabuf.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * DMA buf support for UIO device
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/uio_driver.h>
+#include <linux/slab.h>
+
+#include <uapi/linux/uio/uio.h>
+
+#include "uio_dmabuf.h"
+
+struct uio_dmabuf_mem {
+ int dbuf_fd;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *dbuf_attach;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+ struct list_head list;
+};
+
+long uio_dmabuf_map(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock, void __user *user_args)
+{
+ struct uio_dmabuf_args args;
+ struct uio_dmabuf_mem *dbuf_mem;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *dbuf_attach;
+ enum dma_data_direction dir;
+ struct sg_table *sgt;
+ long ret;
+
+ if (copy_from_user(&args, user_args, sizeof(args))) {
+ ret = -EFAULT;
+ dev_err(dev->dev.parent, "failed to copy from user\n");
+ goto err;
+ }
+
+ dbuf = dma_buf_get(args.dbuf_fd);
+ if (IS_ERR(dbuf)) {
+ dev_err(dev->dev.parent, "failed to get dmabuf\n");
+ return PTR_ERR(dbuf);
+ }
+
+ dbuf_attach = dma_buf_attach(dbuf, dev->dev.parent);
+ if (IS_ERR(dbuf_attach)) {
+ dev_err(dev->dev.parent, "failed to attach dmabuf\n");
+ ret = PTR_ERR(dbuf_attach);
+ goto err_put;
+ }
+
+ switch (args.dir) {
+ case UIO_DMABUF_DIR_BIDIR:
+ dir = DMA_BIDIRECTIONAL;
+ break;
+ case UIO_DMABUF_DIR_TO_DEV:
+ dir = DMA_TO_DEVICE;
+ break;
+ case UIO_DMABUF_DIR_FROM_DEV:
+ dir = DMA_FROM_DEVICE;
+ break;
+ default:
+ /* Not needed with check. Just here for any future change */
+ dev_err(dev->dev.parent, "invalid direction\n");
+ ret = -EINVAL;
+ goto err_detach;
+ }
+
+ sgt = dma_buf_map_attachment(dbuf_attach, dir);
+ if (IS_ERR(sgt)) {
+ dev_err(dev->dev.parent, "failed to get dmabuf scatterlist\n");
+ ret = PTR_ERR(sgt);
+ goto err_detach;
+ }
+
+ /* Accept only contiguous one */
+ if (sgt->nents != 1) {
+ dma_addr_t next_addr = sg_dma_address(sgt->sgl);
+ struct scatterlist *s;
+ unsigned int i;
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ if (!sg_dma_len(s))
+ continue;
+
+ if (sg_dma_address(s) != next_addr) {
+ dev_err(dev->dev.parent,
+ "dmabuf not contiguous\n");
+ ret = -EINVAL;
+ goto err_unmap;
+ }
+
+ next_addr = sg_dma_address(s) + sg_dma_len(s);
+ }
+ }
+
+ dbuf_mem = kzalloc(sizeof(*dbuf_mem), GFP_KERNEL);
+ if (!dbuf_mem) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ dbuf_mem->dbuf_fd = args.dbuf_fd;
+ dbuf_mem->dbuf = dbuf;
+ dbuf_mem->dbuf_attach = dbuf_attach;
+ dbuf_mem->sgt = sgt;
+ dbuf_mem->dir = dir;
+ args.dma_addr = sg_dma_address(sgt->sgl);
+ args.size = dbuf->size;
+
+ if (copy_to_user(user_args, &args, sizeof(args))) {
+ ret = -EFAULT;
+ dev_err(dev->dev.parent, "failed to copy to user\n");
+ goto err_free;
+ }
+
+ mutex_lock(dbufs_lock);
+ list_add(&dbuf_mem->list, dbufs);
+ mutex_unlock(dbufs_lock);
+
+ return 0;
+
+err_free:
+ kfree(dbuf_mem);
+err_unmap:
+ dma_buf_unmap_attachment(dbuf_attach, sgt, dir);
+err_detach:
+ dma_buf_detach(dbuf, dbuf_attach);
+err_put:
+ dma_buf_put(dbuf);
+err:
+ return ret;
+}
+
+long uio_dmabuf_unmap(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock, void __user *user_args)
+
+{
+ struct uio_dmabuf_args args;
+ struct uio_dmabuf_mem *dbuf_mem;
+ long ret;
+
+ if (copy_from_user(&args, user_args, sizeof(args))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ mutex_lock(dbufs_lock);
+ list_for_each_entry(dbuf_mem, dbufs, list) {
+ if (dbuf_mem->dbuf_fd == args.dbuf_fd)
+ break;
+ }
+
+ if (dbuf_mem->dbuf_fd != args.dbuf_fd) {
+ dev_err(dev->dev.parent, "failed to find the dmabuf (%d)\n",
+ args.dbuf_fd);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ list_del(&dbuf_mem->list);
+ mutex_unlock(dbufs_lock);
+
+ dma_buf_unmap_attachment(dbuf_mem->dbuf_attach, dbuf_mem->sgt,
+ dbuf_mem->dir);
+ dma_buf_detach(dbuf_mem->dbuf, dbuf_mem->dbuf_attach);
+ dma_buf_put(dbuf_mem->dbuf);
+ kfree(dbuf_mem);
+
+ memset(&args, 0x0, sizeof(args));
+
+ if (copy_to_user(user_args, &args, sizeof(args))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(dbufs_lock);
+err:
+ return ret;
+}
+
+int uio_dmabuf_cleanup(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock)
+{
+ struct uio_dmabuf_mem *dbuf_mem, *next;
+
+ mutex_lock(dbufs_lock);
+ list_for_each_entry_safe(dbuf_mem, next, dbufs, list) {
+ list_del(&dbuf_mem->list);
+ dma_buf_unmap_attachment(dbuf_mem->dbuf_attach, dbuf_mem->sgt,
+ dbuf_mem->dir);
+ dma_buf_detach(dbuf_mem->dbuf, dbuf_mem->dbuf_attach);
+ dma_buf_put(dbuf_mem->dbuf);
+ kfree(dbuf_mem);
+ }
+ mutex_unlock(dbufs_lock);
+
+ return 0;
+}
diff --git a/drivers/uio/uio_dmabuf.h b/drivers/uio/uio_dmabuf.h
new file mode 100644
index 000000000000..30200306d53a
--- /dev/null
+++ b/drivers/uio/uio_dmabuf.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * DMA buf support for UIO device
+ *
+ */
+
+#ifndef _UIO_DMABUF_H_
+#define _UIO_DMABUF_H_
+
+struct uio_device;
+struct list_head;
+struct mutex;
+
+long uio_dmabuf_map(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock, void __user *user_args);
+long uio_dmabuf_unmap(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock, void __user *user_args);
+
+int uio_dmabuf_cleanup(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock);
+
+#endif
diff --git a/drivers/uio/uio_xilinx_ai_engine.c b/drivers/uio/uio_xilinx_ai_engine.c
new file mode 100644
index 000000000000..174efa805b52
--- /dev/null
+++ b/drivers/uio/uio_xilinx_ai_engine.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx UIO driver for AI Engine
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/irq_sim.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_data/uio_dmem_genirq.h>
+#include <linux/platform_device.h>
+#include <linux/uio_driver.h>
+
+#define DRIVER_NAME "xilinx-aiengine"
+#define XILINX_AI_ENGINE_MAX_IRQ 4
+
+static uint xilinx_ai_engine_mem_cnt = 1;
+module_param_named(mem_cnt, xilinx_ai_engine_mem_cnt, uint, 0444);
+MODULE_PARM_DESC(mem_cnt, "Dynamic memory allocation count (default: 1)");
+
+static uint xilinx_ai_engine_mem_size = 32 * 1024 * 1024;
+module_param_named(mem_size, xilinx_ai_engine_mem_size, uint, 0444);
+MODULE_PARM_DESC(mem_size,
+ "Dynamic memory allocation size in bytes (default: 32 MB)");
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t xilinx_ai_engine_debugfs_write(struct file *f,
+ const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct irq_sim *irq_sim = file_inode(f)->i_private;
+
+ irq_sim_fire(irq_sim, 1);
+
+ return size;
+}
+
+static const struct file_operations debugfs_ops = {
+ .owner = THIS_MODULE,
+ .write = xilinx_ai_engine_debugfs_write,
+};
+
+/**
+ * xilinx_ai_engine_debugfs_init - Initialize the debugfs for irq sim
+ * @pdev: platform device to simulate irq for
+ * @irq_sim: simualated irq
+ *
+ * Initialize the debugfs for irq simulation. This allows to generate
+ * the simulated interrupt from user.
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+static int xilinx_ai_engine_debugfs_init(struct platform_device *pdev,
+ struct irq_sim *irq_sim)
+{
+ int ret;
+ struct dentry *debugfs_dir, *debugfs_file;
+
+ debugfs_dir = debugfs_create_dir("xilinx-ai-engine", NULL);
+ if (!debugfs_dir)
+ return -ENODEV;
+
+ debugfs_file = debugfs_create_file(dev_name(&pdev->dev), 0644,
+ debugfs_dir, irq_sim, &debugfs_ops);
+ if (!debugfs_file) {
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ debugfs_remove_recursive(debugfs_dir);
+ return ret;
+}
+
+/**
+ * xilinx_ai_engine_simulate_irq - Simulate the irq
+ * @pdev: platform device to simulate irq for
+ *
+ * Simulate the irq so the irq can be generated from user. This is only for
+ * debugging purpose.
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+static int xilinx_ai_engine_simulate_irq(struct platform_device *pdev)
+{
+ struct irq_sim *irq_sim;
+ int irq, ret;
+
+ irq_sim = devm_kzalloc(&pdev->dev, sizeof(*irq_sim), GFP_KERNEL);
+ if (!irq_sim)
+ return -ENOMEM;
+
+ /*
+ * Sometimes, the returned base value is 0, so allocate 2 irqs, and
+ * always use the 2nd one.
+ */
+ irq = devm_irq_sim_init(&pdev->dev, irq_sim, 2);
+ if (irq < 0)
+ return irq;
+
+ ret = xilinx_ai_engine_debugfs_init(pdev, irq_sim);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed create debugfs for sim irq");
+ return ret;
+ }
+
+ return irq_sim_irqnum(irq_sim, 1);
+}
+
+#else
+
+static int xilinx_ai_engine_simulate_irq(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+
+#endif
+
+static int xilinx_ai_engine_mem_index(struct uio_info *info,
+ struct vm_area_struct *vma)
+{
+ if (vma->vm_pgoff < MAX_UIO_MAPS) {
+ if (info->mem[vma->vm_pgoff].size == 0)
+ return -1;
+ return (int)vma->vm_pgoff;
+ }
+ return -1;
+}
+
+static const struct vm_operations_struct xilinx_ai_engine_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
+static int xilinx_ai_engine_mmap(struct uio_info *info,
+ struct vm_area_struct *vma)
+{
+ int mi = xilinx_ai_engine_mem_index(info, vma);
+ struct uio_mem *mem;
+
+ if (mi < 0)
+ return -EINVAL;
+ mem = info->mem + mi;
+
+ if (mem->addr & ~PAGE_MASK)
+ return -ENODEV;
+ if (vma->vm_end - vma->vm_start > mem->size)
+ return -EINVAL;
+
+ vma->vm_ops = &xilinx_ai_engine_vm_ops;
+ /*
+ * Make the dynamic memory mapping as write-combined. Only first one
+ * will be the mmio region, which will be mapped as noncached.
+ */
+ if (mi < 1)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ /*
+ * We cannot use the vm_iomap_memory() helper here,
+ * because vma->vm_pgoff is the map index we looked
+ * up above in uio_find_mem_index(), rather than an
+ * actual page offset into the mmap.
+ *
+ * So we just do the physical mmap without a page
+ * offset.
+ */
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ mem->addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static int xilinx_ai_engine_probe(struct platform_device *pdev)
+{
+ struct platform_device *uio;
+ struct uio_dmem_genirq_pdata *pdata;
+ unsigned int i;
+ static const char * const interrupt_names[] = { "interrupt0",
+ "interrupt1",
+ "interrupt2",
+ "interrupt3" };
+ int ret;
+
+ uio = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
+ if (!uio)
+ return -ENOMEM;
+ uio->driver_override = "uio_dmem_genirq";
+ uio->dev.parent = &pdev->dev;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ pdata->num_dynamic_regions = xilinx_ai_engine_mem_cnt;
+ pdata->dynamic_region_sizes = &xilinx_ai_engine_mem_size;
+ pdata->uioinfo.name = DRIVER_NAME;
+ pdata->uioinfo.version = "devicetree";
+ pdata->uioinfo.mmap = xilinx_ai_engine_mmap;
+ /* Set the offset value as it's map index for each memory */
+ for (i = 0; i < MAX_UIO_MAPS; i++)
+ pdata->uioinfo.mem[i].offs = i << PAGE_SHIFT;
+
+ /* TODO: Only one interrupt is supported out of 4 */
+ for (i = 0; i < XILINX_AI_ENGINE_MAX_IRQ; i++) {
+ ret = platform_get_irq_byname(pdev, interrupt_names[i]);
+ if (ret >= 0) {
+ dev_info(&pdev->dev, "%s is used", interrupt_names[i]);
+ break;
+ }
+ }
+
+ /* Interrupt is optional */
+ if (ret < 0) {
+ ret = xilinx_ai_engine_simulate_irq(pdev);
+ if (ret < 0)
+ ret = UIO_IRQ_CUSTOM;
+ }
+ pdata->uioinfo.irq = ret;
+
+ ret = platform_device_add_data(uio, pdata, sizeof(*pdata));
+ if (ret)
+ goto err_out;
+
+ /* Mirror the parent device resource to uio device */
+ ret = platform_device_add_resources(uio, pdev->resource,
+ pdev->num_resources);
+ if (ret)
+ goto err_out;
+
+ /* Configure the dma for uio device using the parent of_node */
+ uio->dev.bus = &platform_bus_type;
+ ret = of_dma_configure(&uio->dev, of_node_get(pdev->dev.of_node), true);
+ of_node_put(pdev->dev.of_node);
+ if (ret)
+ goto err_out;
+
+ ret = platform_device_add(uio);
+ if (ret)
+ goto err_out;
+ platform_set_drvdata(uio, pdata);
+ platform_set_drvdata(pdev, uio);
+
+ dev_info(&pdev->dev, "Xilinx AI Engine UIO driver probed");
+ return 0;
+
+err_out:
+ platform_device_put(uio);
+ dev_err(&pdev->dev,
+ "failed to probe Xilinx AI Engine UIO driver");
+ return ret;
+}
+
+static int xilinx_ai_engine_remove(struct platform_device *pdev)
+{
+ struct platform_device *uio = platform_get_drvdata(pdev);
+
+ platform_device_unregister(uio);
+ of_node_put(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_ai_engine_of_match[] = {
+ { .compatible = "xlnx,ai_engine", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_ai_engine_of_match);
+
+static struct platform_driver xilinx_ai_engine_driver = {
+ .probe = xilinx_ai_engine_probe,
+ .remove = xilinx_ai_engine_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xilinx_ai_engine_of_match,
+ },
+};
+
+module_platform_driver(xilinx_ai_engine_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/uio/uio_xilinx_apm.c b/drivers/uio/uio_xilinx_apm.c
new file mode 100644
index 000000000000..90d70a5a9425
--- /dev/null
+++ b/drivers/uio/uio_xilinx_apm.c
@@ -0,0 +1,369 @@
+/*
+ * Xilinx AXI Performance Monitor
+ *
+ * Copyright (C) 2013 Xilinx, Inc. All rights reserved.
+ *
+ * Description:
+ * This driver is developed for AXI Performance Monitor IP,
+ * designed to monitor AXI4 traffic for performance analysis
+ * of AXI bus in the system. Driver maps HW registers and parameters
+ * to userspace. Userspace need not clear the interrupt of IP since
+ * driver clears the interrupt.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uio_driver.h>
+
+#define XAPM_IS_OFFSET 0x0038 /* Interrupt Status Register */
+#define DRV_NAME "xilinxapm_uio"
+#define DRV_VERSION "1.0"
+#define UIO_DUMMY_MEMSIZE 4096
+#define XAPM_MODE_ADVANCED 1
+#define XAPM_MODE_PROFILE 2
+#define XAPM_MODE_TRACE 3
+
+/**
+ * struct xapm_param - HW parameters structure
+ * @mode: Mode in which APM is working
+ * @maxslots: Maximum number of Slots in APM
+ * @eventcnt: Event counting enabled in APM
+ * @eventlog: Event logging enabled in APM
+ * @sampledcnt: Sampled metric counters enabled in APM
+ * @numcounters: Number of counters in APM
+ * @metricwidth: Metric Counter width (32/64)
+ * @sampledwidth: Sampled metric counter width
+ * @globalcntwidth: Global Clock counter width
+ * @scalefactor: Scaling factor
+ * @isr: Interrupts info shared to userspace
+ * @is_32bit_filter: Flags for 32bit filter
+ * @clk: Clock handle
+ */
+struct xapm_param {
+ u32 mode;
+ u32 maxslots;
+ u32 eventcnt;
+ u32 eventlog;
+ u32 sampledcnt;
+ u32 numcounters;
+ u32 metricwidth;
+ u32 sampledwidth;
+ u32 globalcntwidth;
+ u32 scalefactor;
+ u32 isr;
+ bool is_32bit_filter;
+ struct clk *clk;
+};
+
+/**
+ * struct xapm_dev - Global driver structure
+ * @info: uio_info structure
+ * @param: xapm_param structure
+ * @regs: IOmapped base address
+ */
+struct xapm_dev {
+ struct uio_info info;
+ struct xapm_param param;
+ void __iomem *regs;
+};
+
+/**
+ * xapm_handler - Interrupt handler for APM
+ * @irq: IRQ number
+ * @info: Pointer to uio_info structure
+ *
+ * Return: Always returns IRQ_HANDLED
+ */
+static irqreturn_t xapm_handler(int irq, struct uio_info *info)
+{
+ struct xapm_dev *xapm = (struct xapm_dev *)info->priv;
+ void *ptr;
+
+ ptr = (unsigned long *)xapm->info.mem[1].addr;
+ /* Clear the interrupt and copy the ISR value to userspace */
+ xapm->param.isr = readl(xapm->regs + XAPM_IS_OFFSET);
+ writel(xapm->param.isr, xapm->regs + XAPM_IS_OFFSET);
+ memcpy(ptr, &xapm->param, sizeof(struct xapm_param));
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xapm_getprop - Retrieves dts properties to param structure
+ * @pdev: Pointer to platform device
+ * @param: Pointer to param structure
+ *
+ * Returns: '0' on success and failure value on error
+ */
+static int xapm_getprop(struct platform_device *pdev, struct xapm_param *param)
+{
+ u32 mode = 0;
+ int ret;
+ struct device_node *node;
+
+ node = pdev->dev.of_node;
+
+ /* Retrieve required dts properties and fill param structure */
+ ret = of_property_read_u32(node, "xlnx,enable-profile", &mode);
+ if (ret < 0)
+ dev_info(&pdev->dev, "no property xlnx,enable-profile\n");
+ else if (mode)
+ param->mode = XAPM_MODE_PROFILE;
+
+ ret = of_property_read_u32(node, "xlnx,enable-trace", &mode);
+ if (ret < 0)
+ dev_info(&pdev->dev, "no property xlnx,enable-trace\n");
+ else if (mode)
+ param->mode = XAPM_MODE_TRACE;
+
+ ret = of_property_read_u32(node, "xlnx,num-monitor-slots",
+ &param->maxslots);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,num-monitor-slots");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,enable-event-count",
+ &param->eventcnt);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,enable-event-count");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,enable-event-log",
+ &param->eventlog);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,enable-event-log");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,have-sampled-metric-cnt",
+ &param->sampledcnt);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,have-sampled-metric-cnt");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-of-counters",
+ &param->numcounters);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,num-of-counters");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,metric-count-width",
+ &param->metricwidth);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,metric-count-width");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,metrics-sample-count-width",
+ &param->sampledwidth);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property metrics-sample-count-width");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,global-count-width",
+ &param->globalcntwidth);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,global-count-width");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,metric-count-scale",
+ &param->scalefactor);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,metric-count-scale");
+ return ret;
+ }
+
+ param->is_32bit_filter = of_property_read_bool(node,
+ "xlnx,id-filter-32bit");
+
+ return 0;
+}
+
+/**
+ * xapm_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Returns: '0' on success and failure value on error
+ */
+
+static int xapm_probe(struct platform_device *pdev)
+{
+ struct xapm_dev *xapm;
+ struct resource *res;
+ int irq;
+ int ret;
+ void *ptr;
+
+ xapm = devm_kzalloc(&pdev->dev, (sizeof(struct xapm_dev)), GFP_KERNEL);
+ if (!xapm)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xapm->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xapm->regs)) {
+ dev_err(&pdev->dev, "unable to iomap registers\n");
+ return PTR_ERR(xapm->regs);
+ }
+
+ xapm->param.clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(xapm->param.clk)) {
+ if (PTR_ERR(xapm->param.clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "axi clock error\n");
+ return PTR_ERR(xapm->param.clk);
+ }
+
+ ret = clk_prepare_enable(xapm->param.clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ /* Initialize mode as Advanced so that if no mode in dts, default
+ * is Advanced
+ */
+ xapm->param.mode = XAPM_MODE_ADVANCED;
+ ret = xapm_getprop(pdev, &xapm->param);
+ if (ret < 0)
+ goto err_clk_dis;
+
+ xapm->info.mem[0].name = "xilinx_apm";
+ xapm->info.mem[0].addr = res->start;
+ xapm->info.mem[0].size = resource_size(res);
+ xapm->info.mem[0].memtype = UIO_MEM_PHYS;
+
+ xapm->info.mem[1].addr = (unsigned long)kzalloc(UIO_DUMMY_MEMSIZE,
+ GFP_KERNEL);
+ ptr = (unsigned long *)xapm->info.mem[1].addr;
+ xapm->info.mem[1].size = UIO_DUMMY_MEMSIZE;
+ xapm->info.mem[1].memtype = UIO_MEM_LOGICAL;
+
+ xapm->info.name = "axi-pmon";
+ xapm->info.version = DRV_VERSION;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "unable to get irq\n");
+ ret = irq;
+ goto err_clk_dis;
+ }
+
+ xapm->info.irq = irq;
+ xapm->info.handler = xapm_handler;
+ xapm->info.priv = xapm;
+ xapm->info.irq_flags = IRQF_SHARED;
+
+ memcpy(ptr, &xapm->param, sizeof(struct xapm_param));
+
+ ret = uio_register_device(&pdev->dev, &xapm->info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register to UIO\n");
+ goto err_clk_dis;
+ }
+
+ platform_set_drvdata(pdev, xapm);
+
+ dev_info(&pdev->dev, "Probed Xilinx APM\n");
+
+ return 0;
+
+err_clk_dis:
+ clk_disable_unprepare(xapm->param.clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ return ret;
+}
+
+/**
+ * xapm_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always returns '0'
+ */
+static int xapm_remove(struct platform_device *pdev)
+{
+ struct xapm_dev *xapm = platform_get_drvdata(pdev);
+
+ uio_unregister_device(&xapm->info);
+ clk_disable_unprepare(xapm->param.clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused xapm_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xapm_dev *xapm = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(xapm->param.clk);
+ return 0;
+};
+
+static int __maybe_unused xapm_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xapm_dev *xapm = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(xapm->param.clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ return 0;
+};
+
+static const struct dev_pm_ops xapm_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xapm_runtime_suspend, xapm_runtime_resume)
+ SET_RUNTIME_PM_OPS(xapm_runtime_suspend,
+ xapm_runtime_resume, NULL)
+};
+
+static const struct of_device_id xapm_of_match[] = {
+ { .compatible = "xlnx,axi-perf-monitor", },
+ { /* end of table*/ }
+};
+
+MODULE_DEVICE_TABLE(of, xapm_of_match);
+
+static struct platform_driver xapm_driver = {
+ .driver = {
+ .name = "xilinx-axipmon",
+ .of_match_table = xapm_of_match,
+ .pm = &xapm_dev_pm_ops,
+ },
+ .probe = xapm_probe,
+ .remove = xapm_remove,
+};
+
+module_platform_driver(xapm_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx AXI Performance Monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index c044fba463e4..83238293e5be 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -30,6 +30,7 @@ static const struct ci_hdrc_platform_data ci_default_pdata = {
static struct ci_hdrc_platform_data ci_zynq_pdata = {
.capoffset = DEF_CAPOFFSET,
+ .flags = CI_HDRC_PHY_VBUS_CONTROL,
};
static const struct of_device_id ci_hdrc_usb2_of_match[] = {
@@ -58,6 +59,10 @@ static int ci_hdrc_usb2_probe(struct platform_device *pdev)
if (match && match->data) {
/* struct copy */
*ci_pdata = *(struct ci_hdrc_platform_data *)match->data;
+ ci_pdata->usb_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy",
+ 0);
+ if (IS_ERR(ci_pdata->usb_phy))
+ return PTR_ERR(ci_pdata->usb_phy);
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 48e4a5ca1835..b49edda341ea 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -57,6 +57,14 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
priv->enabled = enable;
}
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL &&
+ ci->usb_phy && ci->usb_phy->set_vbus) {
+ if (enable)
+ ci->usb_phy->set_vbus(ci->usb_phy, 1);
+ else
+ ci->usb_phy->set_vbus(ci->usb_phy, 0);
+ }
+
if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) {
/*
* Marvell 28nm HSIC PHY requires forcing the port to HS mode.
@@ -65,6 +73,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
hw_port_test_set(ci, 5);
hw_port_test_set(ci, 0);
}
+
return 0;
};
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 6ed4b00dba96..ec02ea0ab20d 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -471,6 +471,11 @@ static void ci_otg_drv_vbus(struct otg_fsm *fsm, int on)
return;
}
}
+
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL &&
+ ci->usb_phy && ci->usb_phy->set_vbus)
+ ci->usb_phy->set_vbus(ci->usb_phy, 1);
+
/* Disable data pulse irq */
hw_write_otgsc(ci, OTGSC_DPIE, 0);
@@ -480,6 +485,10 @@ static void ci_otg_drv_vbus(struct otg_fsm *fsm, int on)
if (ci->platdata->reg_vbus)
regulator_disable(ci->platdata->reg_vbus);
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL &&
+ ci->usb_phy && ci->usb_phy->set_vbus)
+ ci->usb_phy->set_vbus(ci->usb_phy, 0);
+
fsm->a_bus_drop = 1;
fsm->a_bus_req = 0;
}
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 864cb18c609a..b15233a6ba9a 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -23,6 +23,7 @@ config USB_DWC3_ULPI
choice
bool "DWC3 Mode Selection"
default USB_DWC3_DUAL_ROLE if (USB && USB_GADGET)
+ default USB_DWC3_OTG if (USB && USB_GADGET && USB_OTG && USB_OTG_FSM)
default USB_DWC3_HOST if (USB && !USB_GADGET)
default USB_DWC3_GADGET if (!USB && USB_GADGET)
@@ -48,6 +49,13 @@ config USB_DWC3_DUAL_ROLE
This is the default mode of working of DWC3 controller where
both host and gadget features are enabled.
+config USB_DWC3_OTG
+ bool "Dual Role mode + OTG"
+ depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3) && USB_OTG && USB_OTG_FSM && PM)
+ help
+ This is the default mode of working of DWC3 controller where
+ both host and gadget features are enabled with OTG support.
+
endchoice
comment "Platform Glue Driver Support"
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index ae86da0dc5bd..258bc4bfca87 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -10,12 +10,16 @@ ifneq ($(CONFIG_TRACING),)
dwc3-y += trace.o
endif
-ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
+ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE) $(CONFIG_USB_DWC3_OTG)),)
dwc3-y += host.o
endif
-ifneq ($(filter y,$(CONFIG_USB_DWC3_GADGET) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
- dwc3-y += gadget.o ep0.o
+ifneq ($(filter y,$(CONFIG_USB_DWC3_GADGET) $(CONFIG_USB_DWC3_DUAL_ROLE) $(CONFIG_USB_DWC3_OTG)),)
+ dwc3-y += gadget.o ep0.o gadget_hibernation.o
+endif
+
+ifneq ($(CONFIG_USB_DWC3_OTG),)
+ dwc3-y += otg.o
endif
ifneq ($(CONFIG_USB_DWC3_DUAL_ROLE),)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c91596efa3e8..ab319aaddb6e 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -25,6 +25,7 @@
#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/of_address.h>
#include <linux/reset.h>
#include <linux/usb/ch9.h>
@@ -245,6 +246,9 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
* XHCI driver will reset the host block. If dwc3 was configured for
* host-only mode, then we can return early.
*/
+ if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->is_hibernated == true)
+ return 0;
+
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
return 0;
@@ -289,7 +293,7 @@ static const struct clk_bulk_data dwc3_core_clks[] = {
*/
static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
{
- u32 reg;
+ u32 reg, gfladj;
u32 dft;
if (dwc->revision < DWC3_REVISION_250A)
@@ -298,13 +302,27 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
if (dwc->fladj == 0)
return;
+ /* Save the initial DWC3_GFLADJ register value */
reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
+ gfladj = reg;
+
+ if (dwc->refclk_fladj) {
+ if ((reg & DWC3_GFLADJ_REFCLK_FLADJ) !=
+ (dwc->fladj & DWC3_GFLADJ_REFCLK_FLADJ)) {
+ reg &= ~DWC3_GFLADJ_REFCLK_FLADJ;
+ reg |= (dwc->fladj & DWC3_GFLADJ_REFCLK_FLADJ);
+ }
+ }
+
dft = reg & DWC3_GFLADJ_30MHZ_MASK;
if (dft != dwc->fladj) {
reg &= ~DWC3_GFLADJ_30MHZ_MASK;
reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
- dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
}
+
+ /* Update DWC3_GFLADJ if there is any change from initial value */
+ if (reg != gfladj)
+ dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
}
/**
@@ -353,7 +371,7 @@ static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
* dwc3_free_event_buffers - frees all allocated event buffers
* @dwc: Pointer to our controller context structure
*/
-static void dwc3_free_event_buffers(struct dwc3 *dwc)
+void dwc3_free_event_buffers(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
@@ -370,7 +388,7 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
* Returns 0 on success otherwise negative errno. In the error case, dwc
* may contain some buffers allocated but not all which were requested.
*/
-static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
+int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
{
struct dwc3_event_buffer *evt;
@@ -394,6 +412,9 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
+ if (dwc->dr_mode == USB_DR_MODE_HOST)
+ return 0;
+
evt = dwc->ev_buf;
evt->lpos = 0;
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
@@ -424,26 +445,46 @@ void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
static int dwc3_alloc_scratch_buffers(struct dwc3 *dwc)
{
+ u32 size;
+
+ if (dwc->dr_mode == USB_DR_MODE_HOST)
+ return 0;
+
if (!dwc->has_hibernation)
return 0;
if (!dwc->nr_scratch)
return 0;
- dwc->scratchbuf = kmalloc_array(dwc->nr_scratch,
- DWC3_SCRATCHBUF_SIZE, GFP_KERNEL);
+ /* Allocate only if scratchbuf is NULL */
+ if (dwc->scratchbuf)
+ return 0;
+
+ size = dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE;
+
+ dwc->scratchbuf = kzalloc(size, GFP_KERNEL);
+
if (!dwc->scratchbuf)
return -ENOMEM;
+ dwc->scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf, size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dwc->dev, dwc->scratch_addr)) {
+ dev_err(dwc->dev, "failed to map scratch buffer\n");
+ return -EFAULT;
+ }
+
return 0;
}
static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
{
- dma_addr_t scratch_addr;
u32 param;
int ret;
+ if (dwc->dr_mode == USB_DR_MODE_HOST)
+ return 0;
+
if (!dwc->has_hibernation)
return 0;
@@ -451,28 +492,17 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
return 0;
/* should never fall here */
- if (!WARN_ON(dwc->scratchbuf))
+ if (WARN_ON(!dwc->scratchbuf))
return 0;
- scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf,
- dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dwc->sysdev, scratch_addr)) {
- dev_err(dwc->sysdev, "failed to map scratch buffer\n");
- ret = -EFAULT;
- goto err0;
- }
-
- dwc->scratch_addr = scratch_addr;
-
- param = lower_32_bits(scratch_addr);
+ param = lower_32_bits(dwc->scratch_addr);
ret = dwc3_send_gadget_generic_command(dwc,
DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO, param);
if (ret < 0)
goto err1;
- param = upper_32_bits(scratch_addr);
+ param = upper_32_bits(dwc->scratch_addr);
ret = dwc3_send_gadget_generic_command(dwc,
DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI, param);
@@ -485,7 +515,6 @@ err1:
dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
-err0:
return ret;
}
@@ -498,7 +527,7 @@ static void dwc3_free_scratch_buffers(struct dwc3 *dwc)
return;
/* should never fall here */
- if (!WARN_ON(dwc->scratchbuf))
+ if (WARN_ON(!dwc->scratchbuf))
return;
dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
@@ -528,6 +557,45 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
}
+static int dwc3_config_soc_bus(struct dwc3 *dwc)
+{
+ int ret;
+
+ /*
+ * Check if CCI is enabled for USB. Returns true
+ * if the node has property 'dma-coherent'. Otherwise
+ * returns false.
+ */
+ if (of_dma_is_coherent(dwc->dev->of_node)) {
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
+ reg |= DWC3_GSBUSCFG0_DATRDREQINFO |
+ DWC3_GSBUSCFG0_DESRDREQINFO |
+ DWC3_GSBUSCFG0_DATWRREQINFO |
+ DWC3_GSBUSCFG0_DESWRREQINFO;
+ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, reg);
+ }
+
+ /*
+ * This routes the usb dma traffic to go through CCI path instead
+ * of reaching DDR directly. This traffic routing is needed to
+ * to make SMMU and CCI work with USB dma.
+ */
+ if (of_dma_is_coherent(dwc->dev->of_node) || dwc->dev->iommu_group) {
+ ret = dwc3_enable_hw_coherency(dwc->dev);
+ if (ret)
+ return ret;
+ }
+
+ /* Send struct dwc3 to dwc3-of-simple for configuring VBUS
+ * during suspend/resume
+ */
+ dwc3_set_simple_data(dwc);
+
+ return 0;
+}
+
static int dwc3_core_ulpi_init(struct dwc3 *dwc)
{
int intf;
@@ -674,8 +742,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
static void dwc3_core_exit(struct dwc3 *dwc)
{
- dwc3_event_buffers_cleanup(dwc);
-
usb_phy_shutdown(dwc->usb2_phy);
usb_phy_shutdown(dwc->usb3_phy);
phy_exit(dwc->usb2_generic_phy);
@@ -743,8 +809,15 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
reg &= ~DWC3_GCTL_DSBLCLKGTNG;
break;
case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
- /* enable hibernation here */
- dwc->nr_scratch = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4);
+ if (!device_property_read_bool(dwc->dev,
+ "snps,enable-hibernation")) {
+ dev_dbg(dwc->dev, "Hibernation not enabled\n");
+ } else {
+ /* enable hibernation here */
+ dwc->nr_scratch =
+ DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4);
+ dwc->has_hibernation = 1;
+ }
/*
* REVISIT Enabling this bit so that host-mode hibernation
@@ -890,7 +963,7 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
*
* Returns 0 on success otherwise negative errno.
*/
-static int dwc3_core_init(struct dwc3 *dwc)
+int dwc3_core_init(struct dwc3 *dwc)
{
u32 reg;
int ret;
@@ -933,15 +1006,30 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
+ if (dwc->scratchbuf == NULL) {
+ ret = dwc3_alloc_scratch_buffers(dwc);
+ if (ret) {
+ dev_err(dwc->dev,
+ "Not enough memory for scratch buffers\n");
+ goto err1;
+ }
+ }
+
ret = dwc3_setup_scratch_buffers(dwc);
- if (ret)
+ if (ret) {
+ dev_err(dwc->dev, "Failed to setup scratch buffers: %d\n", ret);
goto err1;
+ }
/* Adjust Frame Length */
dwc3_frame_length_adjustment(dwc);
dwc3_set_incr_burst_type(dwc);
+ ret = dwc3_config_soc_bus(dwc);
+ if (ret)
+ goto err1;
+
usb_phy_set_suspend(dwc->usb2_phy, 0);
usb_phy_set_suspend(dwc->usb3_phy, 0);
ret = phy_power_on(dwc->usb2_generic_phy);
@@ -958,6 +1046,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
goto err4;
}
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ break;
+ case USB_DR_MODE_HOST:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+ break;
+ case USB_DR_MODE_OTG:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+ break;
+ default:
+ dev_warn(dwc->dev, "Unsupported mode %d\n", dwc->dr_mode);
+ break;
+ }
+
/*
* ENDXFER polling is available on version 3.10a and later of
* the DWC_usb3 controller. It is NOT available in the
@@ -969,6 +1072,32 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
}
+ /* When configured in HOST mode, after issuing U3/L2 exit controller
+ * fails to send proper CRC checksum in CRC5 feild. Because of this
+ * behaviour Transaction Error is generated, resulting in reset and
+ * re-enumeration of usb device attached. Enabling bit 10 of GUCTL1
+ * will correct this problem
+ */
+ if (dwc->enable_guctl1_resume_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_RESUME_QUIRK;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
+ /* SNPS controller when configureed in HOST mode maintains Inter Packet
+ * Delay (IPD) of ~380ns which works with most of the super-speed hubs
+ * except VIA-LAB hubs. When IPD is ~380ns HOST controller fails to
+ * enumerate FS/LS devices when connected behind VIA-LAB hubs.
+ * Enabling bit 9 of GUCTL1 enables the workaround in HW to reduce the
+ * ULPI clock latency by 1 cycle, thus reducing the IPD (~360ns) and
+ * making controller enumerate FS/LS devices connected behind VIA-LAB.
+ */
+ if (dwc->enable_guctl1_ipd_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_IPD_QUIRK;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
if (dwc->revision >= DWC3_REVISION_250A) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
@@ -1178,6 +1307,11 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
dev_err(dev, "failed to initialize dual-role\n");
return ret;
}
+
+#if IS_ENABLED(CONFIG_USB_DWC3_OTG)
+ dwc->current_dr_role = 0;
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
+#endif
break;
default:
dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
@@ -1307,6 +1441,12 @@ static void dwc3_get_properties(struct dwc3 *dwc)
device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
&dwc->fladj);
+ dwc->refclk_fladj = device_property_read_bool(dev,
+ "snps,refclk_fladj");
+ dwc->enable_guctl1_resume_quirk = device_property_read_bool(dev,
+ "snps,enable_guctl1_resume_quirk");
+ dwc->enable_guctl1_ipd_quirk = device_property_read_bool(dev,
+ "snps,enable_guctl1_ipd_quirk");
dwc->dis_metastability_quirk = device_property_read_bool(dev,
"snps,dis_metastability_quirk");
@@ -1316,6 +1456,9 @@ static void dwc3_get_properties(struct dwc3 *dwc)
dwc->hird_threshold = hird_threshold
| (dwc->is_utmi_l1_suspend << 4);
+ /* Check if extra quirks to be added */
+ dwc3_simple_check_quirks(dwc);
+
dwc->rx_thr_num_pkt_prd = rx_thr_num_pkt_prd;
dwc->rx_max_burst_prd = rx_max_burst_prd;
@@ -1388,9 +1531,8 @@ static int dwc3_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res, dwc_res;
struct dwc3 *dwc;
-
int ret;
-
+ u32 mdwidth;
void __iomem *regs;
dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
@@ -1473,6 +1615,11 @@ static int dwc3_probe(struct platform_device *pdev)
spin_lock_init(&dwc->lock);
+ /* Set dma coherent mask to DMA BUS data width */
+ mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
+ dev_dbg(dev, "Enabling %d-bit DMA addresses.\n", mdwidth);
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(mdwidth));
+
pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
@@ -1494,10 +1641,6 @@ static int dwc3_probe(struct platform_device *pdev)
if (ret)
goto err3;
- ret = dwc3_alloc_scratch_buffers(dwc);
- if (ret)
- goto err3;
-
ret = dwc3_core_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -1554,6 +1697,7 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_debugfs_exit(dwc);
dwc3_core_exit_mode(dwc);
+ dwc3_event_buffers_cleanup(dwc);
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
@@ -1655,6 +1799,18 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
break;
}
+ dwc3_event_buffers_cleanup(dwc);
+
+ /* Put the core into D3 state */
+ dwc3_set_usb_core_power(dwc, false);
+
+ /*
+ * To avoid reinit of phy during resume, prevent calling the
+ * dwc3_core_exit() when in D3 state
+ */
+ if (!dwc->is_d3)
+ dwc3_core_exit(dwc);
+
return 0;
}
@@ -1664,6 +1820,13 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
int ret;
u32 reg;
+ /* Bring core to D0 state */
+ dwc3_set_usb_core_power(dwc, true);
+
+ ret = dwc3_core_init(dwc);
+ if (ret)
+ return ret;
+
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
ret = dwc3_core_init_for_resume(dwc);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 679c9f25640c..7e5f6dc4c9bb 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -164,6 +164,9 @@
/* Bit fields */
+/* Global Status Register */
+#define DWC3_GSTS_CUR_MODE (1 << 0)
+
/* Global SoC Bus Configuration INCRx Register 0 */
#define DWC3_GSBUSCFG0_INCR256BRSTENA (1 << 7) /* INCR256 burst */
#define DWC3_GSBUSCFG0_INCR128BRSTENA (1 << 6) /* INCR128 burst */
@@ -196,6 +199,12 @@
#define DWC3_EVENTQ 7
#define DWC3_AUXEVENTQ 8
+/* Global SoC Bus Configuration Register */
+#define DWC3_GSBUSCFG0_DATRDREQINFO (0xf << 28)
+#define DWC3_GSBUSCFG0_DESRDREQINFO (0xf << 24)
+#define DWC3_GSBUSCFG0_DATWRREQINFO (0xf << 20)
+#define DWC3_GSBUSCFG0_DESWRREQINFO (0xf << 16)
+
/* Global RX Threshold Configuration Register */
#define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19)
#define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
@@ -371,6 +380,11 @@
/* Global Frame Length Adjustment Register */
#define DWC3_GFLADJ_30MHZ_SDBND_SEL BIT(7)
#define DWC3_GFLADJ_30MHZ_MASK 0x3f
+#define DWC3_GFLADJ_REFCLK_FLADJ (0x3fff << 8)
+
+/* Global User Control Register 1 */
+#define DWC3_GUCTL1_RESUME_QUIRK (1 << 10)
+#define DWC3_GUCTL1_IPD_QUIRK (1 << 9)
/* Global User Control Register 2 */
#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
@@ -451,6 +465,7 @@
/* Device Status Register */
#define DWC3_DSTS_DCNRD BIT(29)
+#define DWC3_DSTS_SRE BIT(28)
/* This applies for core versions 1.87a and earlier */
#define DWC3_DSTS_PWRUPREQ BIT(24)
@@ -610,6 +625,9 @@
#define DWC3_OSTS_VBUSVLD BIT(1)
#define DWC3_OSTS_CONIDSTS BIT(0)
+/* Stream timer timeout value in millisecs */
+#define STREAM_TIMEOUT_MS 50
+
/* Structures */
struct dwc3_trb;
@@ -872,6 +890,11 @@ struct dwc3_hwparams {
* @epnum: endpoint number to which this request refers
* @trb: pointer to struct dwc3_trb
* @trb_dma: DMA address of @trb
+ * @stream_timeout_timer: Some endpoints may go out of sync with host and
+ * enter into deadlock. For example, stream capable endpoints may enter
+ * into deadlock where the host waits on gadget to issue ERDY and gadget
+ * waits for host to issue prime transaction. To avoid such deadlock this
+ * timer is used.
* @num_trbs: number of TRBs used by this request
* @needs_extra_trb: true when request needs one extra TRB (either due to ZLP
* or unaligned OUT)
@@ -887,6 +910,7 @@ struct dwc3_request {
unsigned num_pending_sgs;
unsigned int num_queued_sgs;
+ u8 first_trb_index;
unsigned remaining;
unsigned int status;
@@ -899,6 +923,7 @@ struct dwc3_request {
u8 epnum;
struct dwc3_trb *trb;
dma_addr_t trb_dma;
+ struct timer_list stream_timeout_timer;
unsigned num_trbs;
@@ -942,7 +967,9 @@ struct dwc3_scratchpad_array {
* @regs: base address for our registers
* @regs_size: address space size
* @fladj: frame length adjustment
+ * @refclk_fladj: boolean to update GFLADJ_REFCLK_FLADJ field also
* @irq_gadget: peripheral controller's IRQ number
+ * @otg: pointer to the dwc3_otg structure
* @otg_irq: IRQ number for OTG IRQs
* @current_otg_role: current role of operation while using the OTG block
* @desired_otg_role: desired role of operation while using the OTG block
@@ -1009,6 +1036,7 @@ struct dwc3_scratchpad_array {
* not needed for DWC_usb31 version 1.70a-ea06 and below
* @usb3_lpm_capable: set if hadrware supports Link Power Management
* @usb2_lpm_disable: set to disable usb2 lpm
+ * @remote_wakeup: set if host supports Remote Wakeup from Peripheral
* @disable_scramble_quirk: set if we enable the disable scramble quirk
* @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
* @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
@@ -1027,11 +1055,16 @@ struct dwc3_scratchpad_array {
* provide a free-running PHY clock.
* @dis_del_phy_power_chg_quirk: set if we disable delay phy power
* change quirk.
+ * @enable_guctl1_resume_quirk: Set if we enable quirk for fixing improper crc
+ * generation after resume from suspend.
+ * @enable_guctl1_ipd_quirk: set if we enable quirk for reducing timing of inter
+ * packet delay(ipd).
* @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
* check during HS transmit.
* @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed
* instances in park mode.
* @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
+ * @is_hibernated: true when dwc3 is hibernated; abort processing events
* @tx_de_emphasis: Tx de-emphasis value
* 0 - -6dB de-emphasis
* 1 - -3.5dB de-emphasis
@@ -1040,6 +1073,12 @@ struct dwc3_scratchpad_array {
* @dis_metastability_quirk: set to disable metastability quirk.
* @imod_interval: set the interrupt moderation interval in 250ns
* increments or 0 to disable.
+ * @is_d3: set if the controller is in d3 state
+ * @saved_regs: registers to be saved/restored during hibernation/wakeup events
+ * @irq_wakeup: wakeup IRQ number, triggered when host asks to wakeup from
+ * hibernation
+ * @force_hiber_wake: flag set when the gadget driver is forcefully triggering
+ a hibernation wakeup event
*/
struct dwc3 {
struct work_struct drd_work;
@@ -1073,6 +1112,8 @@ struct dwc3 {
struct reset_control *reset;
+ struct dwc3_otg *otg;
+
struct usb_phy *usb2_phy;
struct usb_phy *usb3_phy;
@@ -1095,6 +1136,7 @@ struct dwc3 {
enum usb_phy_interface hsphy_mode;
u32 fladj;
+ bool refclk_fladj;
u32 irq_gadget;
u32 otg_irq;
u32 current_otg_role;
@@ -1202,6 +1244,7 @@ struct dwc3 {
unsigned dis_start_transfer_quirk:1;
unsigned usb3_lpm_capable:1;
unsigned usb2_lpm_disable:1;
+ unsigned remote_wakeup:1;
unsigned disable_scramble_quirk:1;
unsigned u2exit_lfps_quirk:1;
@@ -1217,15 +1260,22 @@ struct dwc3 {
unsigned dis_rxdet_inp3_quirk:1;
unsigned dis_u2_freeclk_exists_quirk:1;
unsigned dis_del_phy_power_chg_quirk:1;
+ unsigned enable_guctl1_resume_quirk:1;
+ unsigned enable_guctl1_ipd_quirk:1;
unsigned dis_tx_ipgap_linecheck_quirk:1;
unsigned parkmode_disable_ss_quirk:1;
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
+ unsigned is_hibernated:1;
unsigned dis_metastability_quirk:1;
u16 imod_interval;
+ bool is_d3;
+ u32 *saved_regs;
+ u32 irq_wakeup;
+ bool force_hiber_wake;
};
#define INCRX_BURST_MODE 0
@@ -1402,12 +1452,31 @@ static inline bool dwc3_is_usb31(struct dwc3 *dwc)
return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
}
-bool dwc3_has_imod(struct dwc3 *dwc);
+#if IS_ENABLED(CONFIG_USB_DWC3_OF_SIMPLE)
+int dwc3_enable_hw_coherency(struct device *dev);
+void dwc3_simple_wakeup_capable(struct device *dev, bool wakeup);
+void dwc3_set_simple_data(struct dwc3 *dwc);
+void dwc3_simple_check_quirks(struct dwc3 *dwc);
+int dwc3_set_usb_core_power(struct dwc3 *dwc, bool on);
+#else
+static inline int dwc3_enable_hw_coherency(struct device *dev)
+{ return 1; }
+void dwc3_simple_wakeup_capable(struct device *dev, bool wakeup)
+{ ; }
+void dwc3_set_simple_data(struct dwc3 *dwc)
+{ ; }
+void dwc3_simple_check_quirks(struct dwc3 *dwc)
+{ ; }
+int dwc3_set_usb_core_power(struct dwc3 *dwc, bool on)
+{ ; }
+#endif
+bool dwc3_has_imod(struct dwc3 *dwc);
int dwc3_event_buffers_setup(struct dwc3 *dwc);
void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
-#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)\
+ || IS_ENABLED(CONFIG_USB_DWC3_OTG)
int dwc3_host_init(struct dwc3 *dwc);
void dwc3_host_exit(struct dwc3 *dwc);
#else
@@ -1417,7 +1486,8 @@ static inline void dwc3_host_exit(struct dwc3 *dwc)
{ }
#endif
-#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)\
+ || IS_ENABLED(CONFIG_USB_DWC3_OTG)
int dwc3_gadget_init(struct dwc3 *dwc);
void dwc3_gadget_exit(struct dwc3 *dwc);
int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
@@ -1426,6 +1496,7 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
struct dwc3_gadget_ep_cmd_params *params);
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param);
+int dwc3_core_init(struct dwc3 *dwc);
#else
static inline int dwc3_gadget_init(struct dwc3 *dwc)
{ return 0; }
@@ -1447,11 +1518,19 @@ static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
{ return 0; }
#endif
+#if IS_ENABLED(CONFIG_USB_DWC3_OTG) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+void dwc3_otg_init(struct dwc3 *dwc);
+void dwc3_otg_exit(struct dwc3 *dwc);
+#else
+static inline void dwc3_otg_init(struct dwc3 *dwc)
+{ }
+static inline void dwc3_otg_exit(struct dwc3 *dwc)
+{ }
+#endif
+
#if IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_drd_init(struct dwc3 *dwc);
void dwc3_drd_exit(struct dwc3 *dwc);
-void dwc3_otg_init(struct dwc3 *dwc);
-void dwc3_otg_exit(struct dwc3 *dwc);
void dwc3_otg_update(struct dwc3 *dwc, bool ignore_idstatus);
void dwc3_otg_host_init(struct dwc3 *dwc);
#else
@@ -1459,10 +1538,6 @@ static inline int dwc3_drd_init(struct dwc3 *dwc)
{ return 0; }
static inline void dwc3_drd_exit(struct dwc3 *dwc)
{ }
-static inline void dwc3_otg_init(struct dwc3 *dwc)
-{ }
-static inline void dwc3_otg_exit(struct dwc3 *dwc)
-{ }
static inline void dwc3_otg_update(struct dwc3 *dwc, bool ignore_idstatus)
{ }
static inline void dwc3_otg_host_init(struct dwc3 *dwc)
@@ -1500,4 +1575,8 @@ static inline void dwc3_ulpi_exit(struct dwc3 *dwc)
{ }
#endif
+int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length);
+void dwc3_free_event_buffers(struct dwc3 *dwc);
+int dwc3_event_buffers_setup(struct dwc3 *dwc);
+
#endif /* __DRIVERS_USB_DWC3_CORE_H */
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 1c792710348f..25b753635879 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -625,6 +625,53 @@ static const struct file_operations dwc3_link_state_fops = {
.release = single_release,
};
+static int dwc3_hiber_enable_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+
+ seq_printf(s, "%s\n", (dwc->has_hibernation ? "Enabled" : "Disabled"));
+
+ return 0;
+}
+
+static int dwc3_hiber_enable_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_hiber_enable_show, inode->i_private);
+}
+
+static ssize_t dwc3_hiber_enable_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3 *dwc = s->private;
+ char buf[32];
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ /* Enable hibernation feature */
+ if (!strncmp(buf, "Enable", 6)) {
+ dwc3_gadget_exit(dwc);
+ dwc->has_hibernation = 1;
+ dwc3_gadget_init(dwc);
+ } else if (!strncmp(buf, "Disable", 6)) {
+ dwc3_gadget_exit(dwc);
+ dwc->has_hibernation = 0;
+ dwc3_gadget_init(dwc);
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations dwc3_hiber_enable_fops = {
+ .open = dwc3_hiber_enable_open,
+ .write = dwc3_hiber_enable_write,
+ .read = seq_read,
+};
+
struct dwc3_ep_file_map {
const char name[25];
const struct file_operations *const fops;
@@ -935,6 +982,9 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
&dwc3_testmode_fops);
debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root, dwc,
&dwc3_link_state_fops);
+ debugfs_create_file("hiber_enable", S_IRUGO | S_IWUSR, root,
+ dwc, &dwc3_hiber_enable_fops);
+
dwc3_debugfs_create_endpoint_dirs(dwc, root);
}
}
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index bdac3e7d7b18..95d4271f57a2 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -21,16 +21,172 @@
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <linux/soc/xilinx/zynqmp/fw.h>
+#include <linux/slab.h>
+
+#include <linux/phy/phy-zynqmp.h>
+#include <linux/of_address.h>
+
+#include "core.h"
+
+/* Xilinx USB 3.0 IP Register */
+#define XLNX_USB_COHERENCY 0x005C
+#define XLNX_USB_COHERENCY_ENABLE 0x1
+
+/* ULPI control registers */
+#define ULPI_OTG_CTRL_SET 0xB
+#define ULPI_OTG_CTRL_CLEAR 0XC
+#define OTG_CTRL_DRVVBUS_OFFSET 5
+
+#define XLNX_USB_CUR_PWR_STATE 0x0000
+#define XLNX_CUR_PWR_STATE_D0 0x00
+#define XLNX_CUR_PWR_STATE_D3 0x0F
+#define XLNX_CUR_PWR_STATE_BITMASK 0x0F
+
+#define XLNX_USB_PME_ENABLE 0x0034
+#define XLNX_PME_ENABLE_SIG_GEN 0x01
+
+#define XLNX_USB_REQ_PWR_STATE 0x003c
+#define XLNX_REQ_PWR_STATE_D0 0x00
+#define XLNX_REQ_PWR_STATE_D3 0x03
+
+/* Number of retries for USB operations */
+#define DWC3_PWR_STATE_RETRIES 1000
+#define DWC3_PWR_TIMEOUT 100
+
+#define DWC3_OF_ADDRESS(ADDR) ((ADDR) - DWC3_GLOBALS_REGS_START)
struct dwc3_of_simple {
struct device *dev;
struct clk_bulk_data *clks;
int num_clocks;
+ void __iomem *regs;
+ struct dwc3 *dwc;
+ struct phy *phy;
+ bool wakeup_capable;
+ bool dis_u3_susphy_quirk;
+ bool enable_d3_suspend;
+ char soc_rev;
struct reset_control *resets;
bool pulse_resets;
bool need_reset;
};
+int dwc3_enable_hw_coherency(struct device *dev)
+{
+ struct device_node *node = of_get_parent(dev->of_node);
+
+ if (of_device_is_compatible(node, "xlnx,zynqmp-dwc3")) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+ void __iomem *regs;
+ u32 reg;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+ regs = simple->regs;
+
+ reg = readl(regs + XLNX_USB_COHERENCY);
+ reg |= XLNX_USB_COHERENCY_ENABLE;
+ writel(reg, regs + XLNX_USB_COHERENCY);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dwc3_enable_hw_coherency);
+
+void dwc3_set_simple_data(struct dwc3 *dwc)
+{
+ struct device_node *node = of_get_parent(dwc->dev->of_node);
+
+ if (node && of_device_is_compatible(node, "xlnx,zynqmp-dwc3")) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+
+ /* Set (struct dwc3 *) to simple->dwc for future use */
+ simple->dwc = dwc;
+ }
+}
+EXPORT_SYMBOL(dwc3_set_simple_data);
+
+void dwc3_simple_check_quirks(struct dwc3 *dwc)
+{
+ struct device_node *node = of_get_parent(dwc->dev->of_node);
+
+ if (node && of_device_is_compatible(node, "xlnx,zynqmp-dwc3")) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+
+ /* Add snps,dis_u3_susphy_quirk */
+ dwc->dis_u3_susphy_quirk = simple->dis_u3_susphy_quirk;
+ }
+}
+EXPORT_SYMBOL(dwc3_simple_check_quirks);
+
+void dwc3_simple_wakeup_capable(struct device *dev, bool wakeup)
+{
+ struct device_node *node = of_node_get(dev->parent->of_node);
+
+ /* check for valid parent node */
+ while (node) {
+ if (!of_device_is_compatible(node, "xlnx,zynqmp-dwc3"))
+ node = of_get_next_parent(node);
+ else
+ break;
+ }
+
+ if (node) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+
+ /* Set wakeup capable as true or false */
+ simple->wakeup_capable = wakeup;
+
+ /* Allow D3 state if wakeup capable only */
+ simple->enable_d3_suspend = wakeup;
+ }
+}
+EXPORT_SYMBOL(dwc3_simple_wakeup_capable);
+
+static int dwc3_simple_set_phydata(struct dwc3_of_simple *simple)
+{
+ struct device *dev = simple->dev;
+ struct device_node *np = dev->of_node;
+ struct phy *phy;
+
+ np = of_get_next_child(np, NULL);
+
+ if (np) {
+ phy = of_phy_get(np, "usb3-phy");
+ if (IS_ERR(phy)) {
+ dev_err(dev, "%s: Can't find usb3-phy\n", __func__);
+ return PTR_ERR(phy);
+ }
+
+ /* Store phy for future usage */
+ simple->phy = phy;
+
+ /* assign USB vendor regs addr to phy platform_data */
+ phy->dev.platform_data = simple->regs;
+
+ phy_put(phy);
+ } else {
+ dev_err(dev, "%s: Can't find child node\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int dwc3_of_simple_probe(struct platform_device *pdev)
{
struct dwc3_of_simple *simple;
@@ -47,6 +203,52 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, simple);
simple->dev = dev;
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "xlnx,zynqmp-dwc3")) {
+
+ char *soc_rev;
+ struct resource *res;
+ void __iomem *regs;
+
+ res = platform_get_resource(pdev,
+ IORESOURCE_MEM, 0);
+
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ /* Store the usb control regs into simple for further usage */
+ simple->regs = regs;
+
+ /* read Silicon version using nvmem driver */
+ soc_rev = zynqmp_nvmem_get_silicon_version(&pdev->dev,
+ "soc_revision");
+
+ if (PTR_ERR(soc_rev) == -EPROBE_DEFER) {
+ /* Do a deferred probe */
+ return -EPROBE_DEFER;
+
+ } else if (!IS_ERR(soc_rev) &&
+ (*soc_rev < ZYNQMP_SILICON_V4)) {
+ /* Add snps,dis_u3_susphy_quirk
+ * for SOC revison less than v4
+ */
+ simple->dis_u3_susphy_quirk = true;
+ }
+
+ /* Update soc_rev to simple for future use */
+ simple->soc_rev = *soc_rev;
+
+ /* Clean soc_rev if got a valid pointer from nvmem driver
+ * else we may end up in kernel panic
+ */
+ if (!IS_ERR(soc_rev))
+ kfree(soc_rev);
+ }
+
+ /* Set phy data for future use */
+ dwc3_simple_set_phydata(simple);
+
/*
* Some controllers need to toggle the usb3-otg reset before trying to
* initialize the PHY, otherwise the PHY times out.
@@ -132,6 +334,144 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static void dwc3_simple_vbus(struct dwc3 *dwc, bool vbus_off)
+{
+ u32 reg, addr;
+ u8 val;
+
+ if (vbus_off)
+ addr = ULPI_OTG_CTRL_CLEAR;
+ else
+ addr = ULPI_OTG_CTRL_SET;
+
+ val = (1 << OTG_CTRL_DRVVBUS_OFFSET);
+
+ reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_GUSB2PHYACC_ADDR(addr);
+ reg |= DWC3_GUSB2PHYACC_WRITE | val;
+
+ addr = DWC3_OF_ADDRESS(DWC3_GUSB2PHYACC(0));
+ writel(reg, dwc->regs + addr);
+}
+
+void dwc3_usb2phycfg(struct dwc3 *dwc, bool suspend)
+{
+ u32 addr, reg;
+
+ addr = DWC3_OF_ADDRESS(DWC3_GUSB2PHYCFG(0));
+
+ if (suspend) {
+ reg = readl(dwc->regs + addr);
+ if (!(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ writel(reg, (dwc->regs + addr));
+ }
+ } else {
+ reg = readl(dwc->regs + addr);
+ if ((reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ writel(reg, (dwc->regs + addr));
+ }
+ }
+}
+
+int dwc3_set_usb_core_power(struct dwc3 *dwc, bool on)
+{
+ u32 reg, retries;
+ void __iomem *reg_base;
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+ struct device_node *node = of_get_parent(dwc->dev->of_node);
+
+ /* this is for Xilinx devices only */
+ if (!of_device_is_compatible(node, "xlnx,zynqmp-dwc3"))
+ return 0;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+ reg_base = simple->regs;
+
+ /* Check if entering into D3 state is allowed during suspend */
+ if ((simple->soc_rev < ZYNQMP_SILICON_V4) || !simple->enable_d3_suspend)
+ return 0;
+
+ if (!simple->phy)
+ return 0;
+
+ if (on) {
+ dev_dbg(dwc->dev, "trying to set power state to D0....\n");
+
+ /* Release USB core reset , which was assert during D3 entry */
+ xpsgtr_usb_crst_release(simple->phy);
+
+ /* change power state to D0 */
+ writel(XLNX_REQ_PWR_STATE_D0,
+ reg_base + XLNX_USB_REQ_PWR_STATE);
+
+ /* wait till current state is changed to D0 */
+ retries = DWC3_PWR_STATE_RETRIES;
+ do {
+ reg = readl(reg_base + XLNX_USB_CUR_PWR_STATE);
+ if ((reg & XLNX_CUR_PWR_STATE_BITMASK) ==
+ XLNX_CUR_PWR_STATE_D0)
+ break;
+
+ udelay(DWC3_PWR_TIMEOUT);
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(dwc->dev, "Failed to set power state to D0\n");
+ return -EIO;
+ }
+
+ dwc->is_d3 = false;
+
+ /* Clear Suspend PHY bit if dis_u2_susphy_quirk is set */
+ if (dwc->dis_u2_susphy_quirk)
+ dwc3_usb2phycfg(dwc, false);
+ } else {
+ dev_dbg(dwc->dev, "Trying to set power state to D3...\n");
+
+ /*
+ * Set Suspend PHY bit before entering D3 if
+ * dis_u2_susphy_quirk is set
+ */
+ if (dwc->dis_u2_susphy_quirk)
+ dwc3_usb2phycfg(dwc, true);
+
+ /* enable PME to wakeup from hibernation */
+ writel(XLNX_PME_ENABLE_SIG_GEN, reg_base + XLNX_USB_PME_ENABLE);
+
+ /* change power state to D3 */
+ writel(XLNX_REQ_PWR_STATE_D3,
+ reg_base + XLNX_USB_REQ_PWR_STATE);
+
+ /* wait till current state is changed to D3 */
+ retries = DWC3_PWR_STATE_RETRIES;
+ do {
+ reg = readl(reg_base + XLNX_USB_CUR_PWR_STATE);
+ if ((reg & XLNX_CUR_PWR_STATE_BITMASK) ==
+ XLNX_CUR_PWR_STATE_D3)
+ break;
+
+ udelay(DWC3_PWR_TIMEOUT);
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(dwc->dev, "Failed to set power state to D3\n");
+ return -EIO;
+ }
+
+ /* Assert USB core reset after entering D3 state */
+ xpsgtr_usb_crst_assert(simple->phy);
+
+ dwc->is_d3 = true;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dwc3_set_usb_core_power);
+#endif
static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
{
@@ -153,6 +493,13 @@ static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
+ if (!simple->wakeup_capable && !simple->dwc->is_d3) {
+ /* Ask ULPI to turn OFF Vbus */
+ dwc3_simple_vbus(simple->dwc, true);
+
+ clk_bulk_disable(simple->num_clocks, simple->clks);
+ }
+
if (simple->need_reset)
reset_control_assert(simple->resets);
@@ -162,11 +509,18 @@ static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
static int __maybe_unused dwc3_of_simple_resume(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
+ int ret;
+
+ if (simple->wakeup_capable || simple->dwc->is_d3)
+ return 0;
+
+ ret = clk_bulk_enable(simple->num_clocks, simple->clks);
+ dwc3_simple_vbus(simple->dwc, false);
if (simple->need_reset)
reset_control_deassert(simple->resets);
- return 0;
+ return ret;
}
static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
@@ -178,6 +532,7 @@ static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "rockchip,rk3399-dwc3" },
{ .compatible = "xlnx,zynqmp-dwc3" },
+ { .compatible = "xlnx,versal-dwc3" },
{ .compatible = "cavium,octeon-7130-usb-uctl" },
{ .compatible = "sprd,sc9860-dwc3" },
{ .compatible = "amlogic,meson-axg-dwc3" },
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index e8be2049a416..47d09b1e4a57 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -336,6 +336,11 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
usb_status |= 1 << USB_DEV_STAT_U2_ENABLED;
}
+ /* Sends the status indicating if the remote wakeup is
+ * supported by device.
+ */
+ usb_status |= dwc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+
break;
case USB_RECIP_INTERFACE:
@@ -450,7 +455,12 @@ static int dwc3_ep0_handle_device(struct dwc3 *dwc,
switch (wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
+ if (set)
+ dwc->remote_wakeup = 1;
+ else
+ dwc->remote_wakeup = 0;
break;
+
/*
* 9.4.1 says only only for SS, in AddressState only for
* default control pipe
@@ -467,6 +477,34 @@ static int dwc3_ep0_handle_device(struct dwc3 *dwc,
case USB_DEVICE_TEST_MODE:
ret = dwc3_ep0_handle_test(dwc, state, wIndex, set);
break;
+ case USB_DEVICE_B_HNP_ENABLE:
+ if (set) {
+ if (dwc->gadget.host_request_flag) {
+ struct usb_phy *phy =
+ usb_get_phy(USB_PHY_TYPE_USB3);
+
+ dwc->gadget.b_hnp_enable = 0;
+ dwc->gadget.host_request_flag = 0;
+ otg_start_hnp(phy->otg);
+ usb_put_phy(phy);
+ } else {
+ dwc->gadget.b_hnp_enable = 1;
+ }
+ } else
+ return -EINVAL;
+ break;
+
+ case USB_DEVICE_A_HNP_SUPPORT:
+ /* RH port supports HNP */
+ dev_dbg(dwc->dev,
+ "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n");
+ break;
+
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ /* other RH port does */
+ dev_dbg(dwc->dev,
+ "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
+ break;
default:
ret = -EINVAL;
}
@@ -745,7 +783,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
- ret = dwc3_ep0_handle_status(dwc, ctrl);
+ if (le16_to_cpu(ctrl->wIndex) == OTG_STS_SELECTOR)
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ else
+ ret = dwc3_ep0_handle_status(dwc, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7a28048faa3e..f9886fb9360a 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -207,6 +207,9 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
{
struct dwc3 *dwc = dep->dwc;
+ if (dep->stream_capable && timer_pending(&req->stream_timeout_timer))
+ del_timer(&req->stream_timeout_timer);
+
dwc3_gadget_del_and_unmap_request(dep, req, status);
req->status = DWC3_REQUEST_STATUS_COMPLETED;
@@ -421,8 +424,7 @@ static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
}
-static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
- struct dwc3_trb *trb)
+dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, struct dwc3_trb *trb)
{
u32 offset = (char *) trb - (char *) dep->trb_pool;
@@ -537,6 +539,19 @@ static int dwc3_gadget_start_config(struct dwc3_ep *dep)
return 0;
}
+static void stream_timeout_function(struct timer_list *arg)
+{
+ struct dwc3_request *req = from_timer(req, arg, stream_timeout_timer);
+ struct dwc3_ep *dep = req->dep;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_stop_active_transfer(dep, true, false);
+ __dwc3_gadget_kick_transfer(dep);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
{
const struct usb_ss_ep_comp_descriptor *comp_desc;
@@ -570,7 +585,8 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
- | DWC3_DEPCFG_STREAM_EVENT_EN;
+ | DWC3_DEPCFG_STREAM_EVENT_EN
+ | DWC3_DEPCFG_XFER_COMPLETE_EN;
dep->stream_capable = true;
}
@@ -608,7 +624,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
* Caller should take care of locking. Execute all necessary commands to
* initialize a HW endpoint so it can be used by a gadget driver.
*/
-static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
+int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
@@ -616,7 +632,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
u32 reg;
int ret;
- if (!(dep->flags & DWC3_EP_ENABLED)) {
+ if (!(dep->flags & DWC3_EP_ENABLED) || dwc->is_hibernated) {
ret = dwc3_gadget_start_config(dep);
if (ret)
return ret;
@@ -626,7 +642,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
if (ret)
return ret;
- if (!(dep->flags & DWC3_EP_ENABLED)) {
+ if (!(dep->flags & DWC3_EP_ENABLED) || dwc->is_hibernated) {
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
@@ -640,11 +656,13 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
if (usb_endpoint_xfer_control(desc))
goto out;
- /* Initialize the TRB ring */
- dep->trb_dequeue = 0;
- dep->trb_enqueue = 0;
- memset(dep->trb_pool, 0,
- sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
+ if (!dwc->is_hibernated) {
+ /* Initialize the TRB ring */
+ dep->trb_dequeue = 0;
+ dep->trb_enqueue = 0;
+ memset(dep->trb_pool, 0,
+ sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
+ }
/* Link TRB. The HWO bit is never reset */
trb_st_hw = &dep->trb_pool[0];
@@ -660,8 +678,8 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
* Issue StartTransfer here with no-op TRB so we can always rely on No
* Response Update Transfer command.
*/
- if ((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) ||
- usb_endpoint_xfer_int(desc)) {
+ if (((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) ||
+ usb_endpoint_xfer_int(desc)) && !dwc->is_hibernated) {
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_trb *trb;
dma_addr_t trb_dma;
@@ -687,8 +705,6 @@ out:
return 0;
}
-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
- bool interrupt);
static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
@@ -725,7 +741,7 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
*
* Caller should take care of locking.
*/
-static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
@@ -1002,6 +1018,16 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
+ /*
+ * To start transfer on another stream number endpoint need to relase
+ * previously acquired transfer resource for doing that there is two
+ * ways 1. end transfer 2. set lst bit of control trb
+ *
+ * by using lst bit in ctrl trb we will be able to save the time of
+ * ending transfer hence improved performance
+ */
+ else if (dep->stream_capable)
+ trb->ctrl |= DWC3_TRB_CTRL_LST;
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
@@ -1219,7 +1245,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
-static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
+int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_request *req;
@@ -1249,8 +1275,12 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
if (dep->stream_capable)
cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id);
+ if (dep->stream_capable)
+ cmd = cmd | DWC3_DEPCMD_PARAM(req->request.stream_id);
+
if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
cmd |= DWC3_DEPCMD_PARAM(dep->frame_number);
+
} else {
cmd = DWC3_DEPCMD_UPDATETRANSFER |
DWC3_DEPCMD_PARAM(dep->resource_index);
@@ -1275,6 +1305,13 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
return ret;
}
+ if (starting && dep->stream_capable) {
+ req->stream_timeout_timer.expires = jiffies +
+ msecs_to_jiffies(STREAM_TIMEOUT_MS);
+ mod_timer(&req->stream_timeout_timer,
+ req->stream_timeout_timer.expires);
+ }
+
return 0;
}
@@ -1428,11 +1465,13 @@ static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
ret = __dwc3_gadget_kick_transfer(dep);
if (ret != -EAGAIN)
break;
+ dep->flags &= ~DWC3_EP_PENDING_REQUEST;
}
return ret;
}
+static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc);
static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
{
struct dwc3 *dwc = dep->dwc;
@@ -1457,11 +1496,22 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
req->request.actual = 0;
req->request.status = -EINPROGRESS;
+ if (dep->stream_capable)
+ timer_setup(&req->stream_timeout_timer,
+ stream_timeout_function, 0);
+
trace_dwc3_ep_queue(req);
list_add_tail(&req->list, &dep->pending_list);
req->status = DWC3_REQUEST_STATUS_QUEUED;
+ /* If core is hibernated, need to wakeup (remote wakeup) */
+ if (dwc->is_hibernated) {
+ dwc->force_hiber_wake = true;
+ gadget_wakeup_interrupt(dwc);
+ dwc->force_hiber_wake = false;
+ }
+
/* Start the transfer only after the END_TRANSFER is completed */
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
dep->flags |= DWC3_EP_DELAY_START;
@@ -1478,13 +1528,22 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
if (!(dep->flags & DWC3_EP_PENDING_REQUEST) &&
- !(dep->flags & DWC3_EP_TRANSFER_STARTED))
+ !(dep->flags & DWC3_EP_TRANSFER_STARTED))
return 0;
- if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
- if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) {
- return __dwc3_gadget_start_isoc(dep);
+ if (dep->flags & DWC3_EP_PENDING_REQUEST) {
+ if (dep->flags & DWC3_EP_TRANSFER_STARTED) {
+ /*
+ * If there are not entries in request list
+ * then PENDING flag would be set, so that END
+ * TRANSFER is issued when an entry is added
+ * into request list.
+ */
+ dwc3_stop_active_transfer(dep, true, true);
+ dep->flags = DWC3_EP_ENABLED;
}
+ /* Rest is taken care by DWC3_DEPEVT_XFERNOTREADY */
+ return 0;
}
}
@@ -1565,6 +1624,9 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
spin_lock_irqsave(&dwc->lock, flags);
+ if (dep->stream_capable && timer_pending(&req->stream_timeout_timer))
+ del_timer(&req->stream_timeout_timer);
+
list_for_each_entry(r, &dep->pending_list, list) {
if (r == req)
break;
@@ -1843,7 +1905,7 @@ static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
return 0;
}
-static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
+int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg;
u32 timeout = 500;
@@ -1918,7 +1980,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
return ret;
}
-static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
+void dwc3_gadget_enable_irq(struct dwc3 *dwc)
{
u32 reg;
@@ -1932,13 +1994,17 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
DWC3_DEVTEN_USBRSTEN |
DWC3_DEVTEN_DISCONNEVTEN);
+ /* Enable hibernation IRQ */
+ if (dwc->has_hibernation)
+ reg |= DWC3_DEVTEN_HIBERNATIONREQEVTEN;
+
if (dwc->revision < DWC3_REVISION_250A)
reg |= DWC3_DEVTEN_ULSTCNGEN;
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
-static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
+void dwc3_gadget_disable_irq(struct dwc3 *dwc)
{
/* mask all interrupts */
dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
@@ -2022,6 +2088,16 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
dwc3_gadget_setup_nump(dwc);
+ /* For OTG mode, check if the core is currently in Host mode.
+ * This is not an error condition as there are times when the core is
+ * working as host and kernel is told to initiate bind operation with
+ * gadget class driver module.
+ * The below remaining operations are handled in OTG driver whenever
+ * required.
+ */
+ if (dwc3_readl(dwc->regs, DWC3_GSTS) & DWC3_GSTS_CUR_MODE)
+ return 0;
+
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -2055,6 +2131,7 @@ err0:
return ret;
}
+static irqreturn_t wakeup_interrupt(int irq, void *_dwc);
static int dwc3_gadget_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
@@ -2072,6 +2149,18 @@ static int dwc3_gadget_start(struct usb_gadget *g,
goto err0;
}
+ /* look for wakeup interrupt if hibernation is supported */
+ if (dwc->has_hibernation) {
+ irq = dwc->irq_wakeup;
+ ret = devm_request_irq(dwc->dev, irq, wakeup_interrupt,
+ IRQF_SHARED, "usb-wakeup", dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to request wakeup irq #%d --> %d\n",
+ irq, ret);
+ goto err0;
+ }
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
if (dwc->gadget_driver) {
dev_err(dwc->dev, "%s is already bound to %s\n",
@@ -2092,7 +2181,10 @@ static int dwc3_gadget_start(struct usb_gadget *g,
err1:
spin_unlock_irqrestore(&dwc->lock, flags);
- free_irq(irq, dwc);
+ if (dwc->irq_gadget)
+ free_irq(dwc->irq_gadget, dwc->ev_buf);
+ if (dwc->irq_wakeup)
+ free_irq(dwc->irq_wakeup, dwc);
err0:
return ret;
@@ -2122,6 +2214,7 @@ out:
spin_unlock_irqrestore(&dwc->lock, flags);
free_irq(dwc->irq_gadget, dwc->ev_buf);
+ free_irq(dwc->irq_wakeup, dwc);
return 0;
}
@@ -2459,7 +2552,11 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
if (event->status & DEPEVT_STATUS_SHORT && !chain)
return 1;
- if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
+ if ((event->status & DEPEVT_STATUS_IOC) &&
+ (trb->ctrl & DWC3_TRB_CTRL_IOC))
+ return 1;
+
+ if ((event->status & DEPEVT_STATUS_LST) &&
(trb->ctrl & DWC3_TRB_CTRL_LST))
return 1;
@@ -2528,9 +2625,13 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
req->request.actual = req->request.length - req->remaining;
- if (!dwc3_gadget_ep_request_completed(req)) {
- __dwc3_gadget_kick_transfer(dep);
- goto out;
+ if ((!dwc3_gadget_ep_request_completed(req) &&
+ req->num_pending_sgs) || req->num_pending_sgs) {
+ if (!(event->status &
+ (DEPEVT_STATUS_SHORT | DEPEVT_STATUS_LST))) {
+ __dwc3_gadget_kick_transfer(dep);
+ goto out;
+ }
}
dwc3_gadget_giveback(dep, req, status);
@@ -2573,10 +2674,26 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
if (event->status & DEPEVT_STATUS_BUSERR)
status = -ECONNRESET;
- if (event->status & DEPEVT_STATUS_MISSED_ISOC) {
+ if ((event->status & DEPEVT_STATUS_MISSED_ISOC) &&
+ usb_endpoint_xfer_isoc(dep->endpoint.desc))
status = -EXDEV;
- if (list_empty(&dep->started_list))
+ dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
+
+ if (dep->stream_capable && !list_empty(&dep->started_list))
+ __dwc3_gadget_kick_transfer(dep);
+
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ list_empty(&dep->started_list)) {
+ if (list_empty(&dep->pending_list))
+ /*
+ * If there is no entry in request list then do
+ * not issue END TRANSFER now. Just set PENDING
+ * flag, so that END TRANSFER is issued when an
+ * entry is added into request list.
+ */
+ dep->flags |= DWC3_EP_PENDING_REQUEST;
+ else
stop = true;
}
@@ -2618,6 +2735,28 @@ static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
(void) __dwc3_gadget_start_isoc(dep);
}
+static void dwc3_endpoint_stream_event(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_ep *dep;
+ struct dwc3_request *req;
+ u8 epnum = event->endpoint_number;
+ u8 stream_id;
+
+ dep = dwc->eps[epnum];
+
+ stream_id = event->parameters;
+
+ /* Check for request matching the streamid and delete the timer */
+ list_for_each_entry(req, &dep->started_list, list) {
+ if (req->request.stream_id == stream_id) {
+ if (timer_pending(&req->stream_timeout_timer))
+ del_timer(&req->stream_timeout_timer);
+ break;
+ }
+ }
+}
+
static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
@@ -2642,12 +2781,21 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
}
switch (event->endpoint_event) {
+ case DWC3_DEPEVT_XFERCOMPLETE:
+ if (!dep->stream_capable)
+ break;
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ /* Fall Through */
case DWC3_DEPEVT_XFERINPROGRESS:
dwc3_gadget_endpoint_transfer_in_progress(dep, event);
break;
case DWC3_DEPEVT_XFERNOTREADY:
dwc3_gadget_endpoint_transfer_not_ready(dep, event);
break;
+ case DWC3_DEPEVT_STREAMEVT:
+ if (event->status == DEPEVT_STREAMEVT_FOUND)
+ dwc3_endpoint_stream_event(dwc, event);
+ break;
case DWC3_DEPEVT_EPCMDCMPLT:
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
@@ -2662,8 +2810,6 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep->flags &= ~DWC3_EP_DELAY_START;
}
break;
- case DWC3_DEPEVT_STREAMEVT:
- case DWC3_DEPEVT_XFERCOMPLETE:
case DWC3_DEPEVT_RXTXFIFOEVT:
break;
}
@@ -2708,7 +2854,7 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
}
}
-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
bool interrupt)
{
struct dwc3 *dwc = dep->dwc;
@@ -2760,6 +2906,13 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
WARN_ON_ONCE(ret);
dep->resource_index = 0;
+ /*
+ * when transfer is stopped with force rm bit false, it can be
+ * restarted by passing resource_index in params; don't loose it
+ */
+ if (force)
+ dep->resource_index = 0;
+
if (!interrupt)
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
else
@@ -2804,6 +2957,15 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc3_disconnect_gadget(dwc);
+ /* In USB 2.0, to avoid hibernation interrupt at the time of connection
+ * clear DWC3_DCTL_KEEP_CONNECT bit.
+ */
+ if (dwc->has_hibernation) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
@@ -2977,6 +3139,16 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
}
/*
+ * In USB 2.0, to avoid hibernation interrupt at the time of connection
+ * set DWC3_DCTL_KEEP_CONNECT bit here
+ */
+ if (dwc->has_hibernation) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
+ /*
* Configure PHY via GUSB3PIPECTLn if required.
*
* Update GTXFIFOSIZn
@@ -2999,6 +3171,17 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
}
}
+static irqreturn_t wakeup_interrupt(int irq, void *_dwc)
+{
+ struct dwc3 *dwc = (struct dwc3 *)_dwc;
+
+ spin_lock(&dwc->lock);
+ gadget_wakeup_interrupt(dwc);
+ spin_unlock(&dwc->lock);
+
+ return IRQ_HANDLED;
+}
+
static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
unsigned int evtinfo)
{
@@ -3126,10 +3309,12 @@ static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
* STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
* Device Fallback from SuperSpeed
*/
- if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
+ if ((!!is_ss ^ (dwc->speed >= DWC3_DSTS_SUPERSPEED)) &&
+ (!(dwc->has_hibernation)))
return;
/* enter hibernation here */
+ gadget_hibernation_interrupt(dwc);
}
static void dwc3_gadget_interrupt(struct dwc3 *dwc,
@@ -3223,12 +3408,18 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
*/
evt->lpos = (evt->lpos + 4) % evt->length;
left -= 4;
+
+ if (dwc->is_hibernated)
+ break;
}
evt->count = 0;
evt->flags &= ~DWC3_EVENT_PENDING;
ret = IRQ_HANDLED;
+ if (dwc->is_hibernated)
+ return ret;
+
/* Unmask interrupt */
reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
reg &= ~DWC3_GEVNTSIZ_INTMASK;
@@ -3270,6 +3461,9 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
return IRQ_HANDLED;
}
+ if (dwc->is_hibernated)
+ return IRQ_HANDLED;
+
/*
* With PCIe legacy interrupt, test shows that top-half irq handler can
* be called again after HW interrupt deassertion. Check if bottom-half
@@ -3313,7 +3507,7 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
static int dwc3_gadget_get_irq(struct dwc3 *dwc)
{
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
- int irq;
+ int irq, irq_hiber;
irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
if (irq > 0)
@@ -3331,15 +3525,23 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc)
irq = platform_get_irq(dwc3_pdev, 0);
if (irq > 0)
- goto out;
-
- if (irq != -EPROBE_DEFER)
- dev_err(dwc->dev, "missing peripheral IRQ\n");
-
- if (!irq)
- irq = -EINVAL;
+ dwc->irq_gadget = irq;
+ if (irq == -EPROBE_DEFER)
+ goto out;
out:
+ /* look for wakeup interrupt if hibernation is supported */
+ if (dwc->has_hibernation) {
+ irq_hiber = platform_get_irq_byname(dwc3_pdev, "hiber");
+ if (irq_hiber > 0) {
+ dwc->irq_wakeup = irq_hiber;
+ } else {
+ irq_hiber = platform_get_irq(dwc3_pdev, 2);
+ if (irq_hiber > 0)
+ dwc->irq_wakeup = irq_hiber;
+ }
+ }
+
return irq;
}
@@ -3433,6 +3635,28 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed);
+ if (dwc->dr_mode == USB_DR_MODE_OTG) {
+ struct usb_phy *phy;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (!IS_ERR(phy)) {
+ if (phy && phy->otg) {
+ ret = otg_set_peripheral(phy->otg,
+ &dwc->gadget);
+ if (ret) {
+ dev_err(dwc->dev,
+ "otg_set_peripheral failed\n");
+ usb_put_phy(phy);
+ phy = NULL;
+ goto err4;
+ }
+ } else {
+ usb_put_phy(phy);
+ phy = NULL;
+ }
+ }
+ }
+
return 0;
err4:
@@ -3471,6 +3695,16 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
if (!dwc->gadget_driver)
return 0;
+ if (dwc->is_hibernated) {
+ /*
+ * As we are about to suspend, wake the controller from
+ * D3 & hibernation states
+ */
+ dwc->force_hiber_wake = true;
+ gadget_wakeup_interrupt(dwc);
+ dwc->force_hiber_wake = false;
+ }
+
dwc3_gadget_run_stop(dwc, false, false);
dwc3_disconnect_gadget(dwc);
__dwc3_gadget_stop(dwc);
@@ -3481,6 +3715,7 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
int dwc3_gadget_resume(struct dwc3 *dwc)
{
int ret;
+ u32 reg;
if (!dwc->gadget_driver)
return 0;
@@ -3493,6 +3728,15 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
if (ret < 0)
goto err1;
+ /* In USB 2.0, to avoid hibernation interrupt at the time of connection
+ * set DWC3_DCTL_KEEP_CONNECT bit.
+ */
+ if (dwc->has_hibernation) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
return 0;
err1:
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 3ed738e86ea7..47a275b0184f 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -48,6 +48,14 @@ struct dwc3;
/* DEPXFERCFG parameter 0 */
#define DWC3_DEPXFERCFG_NUM_XFER_RES(n) ((n) & 0xffff)
+/* Below used in hibernation */
+#define DWC3_NON_STICKY_RESTORE_RETRIES 500
+#define DWC3_NON_STICKY_SAVE_RETRIES 500
+#define DWC3_DEVICE_CTRL_READY_RETRIES 20000
+#define DWC3_NON_STICKY_RESTORE_DELAY 100
+#define DWC3_NON_STICKY_SAVE_DELAY 100
+#define DWC3_DEVICE_CTRL_READY_DELAY 5
+
/* -------------------------------------------------------------------------- */
#define to_dwc3_request(r) (container_of(r, struct dwc3_request, request))
@@ -100,11 +108,21 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event);
void dwc3_ep0_out_start(struct dwc3 *dwc);
+void dwc3_gadget_enable_irq(struct dwc3 *dwc);
+void dwc3_gadget_disable_irq(struct dwc3 *dwc);
int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags);
int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
+int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action);
+int __dwc3_gadget_ep_disable(struct dwc3_ep *dep);
+int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep);
+void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt);
+int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend);
+dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, struct dwc3_trb *trb);
+void gadget_hibernation_interrupt(struct dwc3 *dwc);
+void gadget_wakeup_interrupt(struct dwc3 *dwc);
/**
* dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
diff --git a/drivers/usb/dwc3/gadget_hibernation.c b/drivers/usb/dwc3/gadget_hibernation.c
new file mode 100644
index 000000000000..3f6a98150764
--- /dev/null
+++ b/drivers/usb/dwc3/gadget_hibernation.c
@@ -0,0 +1,567 @@
+/**
+ * gadget_hibernation.c - DesignWare USB3 DRD Controller gadget hibernation file
+ *
+ * This file has routines to handle hibernation and wakeup events in gadget mode
+ *
+ * Author: Mayank Adesara <madesara@xilinx.com>
+ * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "core.h"
+#include "gadget.h"
+#include "debug.h"
+#include "io.h"
+
+/* array of registers to save on hibernation and restore them on wakeup */
+static u32 save_reg_addr[] = {
+ DWC3_DCTL,
+ DWC3_DCFG,
+ DWC3_DEVTEN
+};
+
+/*
+ * wait_timeout - Waits until timeout
+ * @wait_time: time to wait in jiffies
+ */
+static void wait_timeout(unsigned long wait_time)
+{
+ unsigned long timeout = jiffies + wait_time;
+
+ while (!time_after_eq(jiffies, timeout))
+ cpu_relax();
+}
+
+/**
+ * save_regs - Saves registers on hibernation
+ * @dwc: pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int save_regs(struct dwc3 *dwc)
+{
+ int i;
+
+ if (!dwc->saved_regs) {
+ dwc->saved_regs = devm_kmalloc(dwc->dev,
+ sizeof(save_reg_addr),
+ GFP_KERNEL);
+ if (!dwc->saved_regs) {
+ dev_err(dwc->dev, "Not enough memory to save regs\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(save_reg_addr); i++)
+ dwc->saved_regs[i] = dwc3_readl(dwc->regs,
+ save_reg_addr[i]);
+ return 0;
+}
+
+/**
+ * restore_regs - Restores registers on wakeup
+ * @dwc: pointer to our controller context structure
+ */
+static void restore_regs(struct dwc3 *dwc)
+{
+ int i;
+
+ if (!dwc->saved_regs) {
+ dev_warn(dwc->dev, "Regs not saved\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(save_reg_addr); i++)
+ dwc3_writel(dwc->regs, save_reg_addr[i],
+ dwc->saved_regs[i]);
+}
+
+/**
+ * restart_ep0_trans - Restarts EP0 transfer on wakeup
+ * @dwc: pointer to our controller context structure
+ * epnum: endpoint number
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int restart_ep0_trans(struct dwc3 *dwc, int epnum)
+{
+ struct dwc3_ep *dep = dwc->eps[epnum];
+ struct dwc3_trb *trb = dwc->ep0_trb;
+ struct dwc3_gadget_ep_cmd_params params;
+ int ret;
+ u32 cmd;
+
+ memset(&params, 0, sizeof(params));
+ params.param0 = upper_32_bits(dwc->ep0_trb_addr);
+ params.param1 = lower_32_bits(dwc->ep0_trb_addr);
+
+ /* set HWO bit back to 1 and restart transfer */
+ trb->ctrl |= DWC3_TRB_CTRL_HWO;
+
+ /* Clear the TRBSTS feild */
+ trb->size &= ~(0x0F << 28);
+
+ cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_PARAM(0);
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ if (ret < 0) {
+ dev_err(dwc->dev, "failed to restart transfer on %s\n",
+ dep->name);
+ return ret;
+ }
+
+ dwc3_gadget_ep_get_transfer_index(dep);
+
+ return 0;
+}
+
+extern dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
+ struct dwc3_trb *trb);
+/**
+ * restore_eps - Restores non EP0 eps in the same state as they were before
+ * hibernation
+ * @dwc: pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int restore_eps(struct dwc3 *dwc)
+{
+ int epnum, ret;
+
+ for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ /* Enable the endpoint */
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ ret = __dwc3_gadget_ep_enable(dep, true);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ return ret;
+ }
+ }
+
+ for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ if (dep->flags & DWC3_EP_STALL) {
+ /* Set stall for the endpoint */
+ struct dwc3_gadget_ep_cmd_params params;
+
+ memset(&params, 0x00, sizeof(params));
+
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
+ &params);
+ if (ret) {
+ dev_err(dwc->dev, "failed to set STALL on %s\n",
+ dep->name);
+ return ret;
+ }
+ } else {
+ u32 cmd;
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_trb *trb;
+ u8 trb_dequeue = dep->trb_dequeue;
+
+ trb = &dep->trb_pool[trb_dequeue];
+
+ /*
+ * check the last processed TRBSTS field has value
+ * 4 (TRBInProgress), if yes resubmit the same TRB
+ */
+ if (DWC3_TRB_SIZE_TRBSTS(trb->size) ==
+ DWC3_TRB_STS_XFER_IN_PROG) {
+ /* Set the HWO bit */
+ trb->ctrl |= DWC3_TRB_CTRL_HWO;
+
+ /* Clear the TRBSTS field */
+ trb->size &= ~(0x0F << 28);
+
+ memset(&params, 0, sizeof(params));
+
+ /* Issue starttransfer */
+ params.param0 =
+ upper_32_bits(dwc3_trb_dma_offset(dep,
+ trb));
+ params.param1 =
+ lower_32_bits(dwc3_trb_dma_offset(dep,
+ trb));
+
+ cmd = DWC3_DEPCMD_STARTTRANSFER |
+ DWC3_DEPCMD_PARAM(0);
+
+ dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+
+ dwc3_gadget_ep_get_transfer_index(dep);
+ } else {
+ ret = __dwc3_gadget_kick_transfer(dep);
+ if (ret) {
+ dev_err(dwc->dev,
+ "%s: restart transfer failed\n",
+ dep->name);
+ return ret;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * restore_ep0 - Restores EP0 in the same state as they were before hibernation
+ * @dwc: pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int restore_ep0(struct dwc3 *dwc)
+{
+ int epnum, ret;
+
+ for (epnum = 0; epnum < 2; epnum++) {
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ ret = __dwc3_gadget_ep_enable(dep, true);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ return ret;
+ }
+
+ if (dep->flags & DWC3_EP_STALL) {
+ struct dwc3_gadget_ep_cmd_params params;
+
+ memset(&params, 0x00, sizeof(params));
+
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
+ &params);
+ if (ret) {
+ dev_err(dwc->dev, "failed to set STALL on %s\n",
+ dep->name);
+ return ret;
+ }
+ } else {
+ if (!dep->resource_index && epnum)
+ continue;
+
+ ret = restart_ep0_trans(dwc, epnum);
+ if (ret) {
+ dev_err(dwc->dev,
+ "failed to restart transfer on: %s\n",
+ dep->name);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * save_endpoint_state - Saves ep state on hibernation
+ * @dep: endpoint to get state
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int save_endpoint_state(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_gadget_ep_cmd_params params;
+ int ret;
+
+ memset(&params, 0, sizeof(params));
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_GETEPSTATE,
+ &params);
+ if (ret) {
+ dev_err(dwc->dev, "Failed to get endpoint state on %s\n",
+ dep->name);
+ return ret;
+ }
+
+ dep->saved_state = dwc3_readl(dep->regs, DWC3_DEPCMDPAR2);
+ return 0;
+}
+
+/**
+ * gadget_hibernation_interrupt - Interrupt handler of hibernation
+ * @dwc: pointer to our controller context structure
+ */
+void gadget_hibernation_interrupt(struct dwc3 *dwc)
+{
+ u32 epnum, reg;
+ int retries, ret;
+
+ /* Check if the link state is valid before hibernating */
+ switch (dwc3_gadget_get_link_state(dwc)) {
+ case DWC3_LINK_STATE_U3:
+ case DWC3_LINK_STATE_SS_DIS:
+ break;
+ default:
+ dev_dbg(dwc->dev,
+ "%s: Got fake hiber event\n", __func__);
+ return;
+ }
+
+ /* stop all active transfers and save endpoint status */
+ for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ if (dep->flags & DWC3_EP_TRANSFER_STARTED)
+ dwc3_stop_active_transfer(dep, false, true);
+
+ save_endpoint_state(dep);
+ }
+
+ /* stop the controller */
+ dwc3_gadget_run_stop(dwc, false, true);
+ dwc->is_hibernated = true;
+
+ /*
+ * ack events, don't process them; h/w decrements the count by the value
+ * written
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
+ dwc->ev_buf->count = 0;
+ dwc->ev_buf->flags &= ~DWC3_EVENT_PENDING;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+
+ /* disable keep connect if we are disconnected right now */
+ if (dwc3_gadget_get_link_state(dwc) == DWC3_LINK_STATE_SS_DIS) {
+ reg &= ~DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ } else {
+ reg |= DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
+ /* save generic registers */
+ save_regs(dwc);
+
+ /* initiate controller save state */
+ reg |= DWC3_DCTL_CSS;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ /* wait till controller saves state */
+ retries = DWC3_NON_STICKY_SAVE_RETRIES;
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (!(reg & DWC3_DSTS_SSS))
+ break;
+
+ udelay(DWC3_NON_STICKY_SAVE_DELAY);
+ } while (--retries);
+
+ if (retries < 0) {
+ dev_err(dwc->dev, "USB core failed to save state\n");
+ goto err;
+ }
+
+ /* Set the controller as wakeup capable */
+ dwc3_simple_wakeup_capable(dwc->dev, true);
+
+ /* set USB core power state to D3 - power down */
+ ret = dwc3_set_usb_core_power(dwc, false);
+ if (ret < 0) {
+ dev_err(dwc->dev, "%s: Failed to hibernate\n", __func__);
+ /* call wakeup handler */
+ gadget_wakeup_interrupt(dwc);
+ return;
+ }
+
+ dev_info(dwc->dev, "Hibernated!\n");
+ return;
+
+err:
+ dev_err(dwc->dev, "Fail in handling Hibernation Interrupt\n");
+}
+
+/**
+ * gadget_wakeup_interrupt - Interrupt handler of wakeup
+ * @dwc: pointer to our controller context structure
+ */
+void gadget_wakeup_interrupt(struct dwc3 *dwc)
+{
+ u32 reg, link_state;
+ int ret, retries;
+ bool enter_hiber = false;
+
+ /* On USB 2.0 we observed back to back wakeup interrupts */
+ if (!dwc->is_hibernated) {
+ dev_err(dwc->dev, "Not in hibernated state\n");
+ goto err;
+ }
+
+ /* Restore power to USB core */
+ if (dwc3_set_usb_core_power(dwc, true)) {
+ dev_err(dwc->dev, "Failed to restore USB core power\n");
+ goto err;
+ }
+
+ /* Clear the controller wakeup capable flag */
+ dwc3_simple_wakeup_capable(dwc->dev, false);
+
+ /* Initialize the core and restore the saved registers */
+ dwc3_core_init(dwc);
+ restore_regs(dwc);
+
+ /* ask controller to save the non-sticky registers */
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_CRS;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ /* Wait till non-sticky registers are restored */
+ retries = DWC3_NON_STICKY_RESTORE_RETRIES;
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (!(reg & DWC3_DSTS_RSS))
+ break;
+
+ udelay(DWC3_NON_STICKY_RESTORE_DELAY);
+ } while (--retries);
+
+ if (retries < 0 || (reg & DWC3_DSTS_SRE)) {
+ dev_err(dwc->dev, "Failed to restore non-sticky regs\n");
+ goto err;
+ }
+
+ /* restore ep0 endpoints */
+ ret = restore_ep0(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "Failed in restorig EP0 states\n");
+ goto err;
+ }
+
+ /* start the controller */
+ ret = dwc3_gadget_run_stop(dwc, true, false);
+ if (ret < 0) {
+ dev_err(dwc->dev, "USB core failed to start on wakeup\n");
+ goto err;
+ }
+
+ /* Wait until device controller is ready */
+ retries = DWC3_DEVICE_CTRL_READY_RETRIES;
+ while (--retries) {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (reg & DWC3_DSTS_DCNRD)
+ udelay(DWC3_DEVICE_CTRL_READY_DELAY);
+ else
+ break;
+ }
+
+ if (retries < 0) {
+ dev_err(dwc->dev, "USB core failed to restore controller\n");
+ goto err;
+ }
+
+ /*
+ * As some suprious signals also cause wakeup event, wait for some time
+ * and check the link state to confirm if the wakeup signal is real
+ */
+ wait_timeout(msecs_to_jiffies(10));
+
+ link_state = dwc3_gadget_get_link_state(dwc);
+
+ /* check if the link state is in a valid state */
+ switch (link_state) {
+ case DWC3_LINK_STATE_RESET:
+ /* Reset devaddr */
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~(DWC3_DCFG_DEVADDR_MASK);
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+
+ /* issue recovery on the link */
+ ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
+ if (ret < 0) {
+ dev_err(dwc->dev,
+ "Failed to set link state to Recovery\n");
+ goto err;
+ }
+
+ break;
+
+ case DWC3_LINK_STATE_SS_DIS:
+ /* Clear keep connect from reconnecting to HOST */
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ /* fall through */
+ case DWC3_LINK_STATE_U3:
+ /* Ignore wakeup event as the link is still in U3 state */
+ dev_dbg(dwc->dev, "False wakeup event %d\n", link_state);
+
+ if (!dwc->force_hiber_wake)
+ enter_hiber = true;
+ break;
+
+ default:
+ /* issue recovery on the link */
+ ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
+ if (ret < 0) {
+ dev_err(dwc->dev,
+ "Failed to set link state to Recovery\n");
+ goto err;
+ }
+
+ break;
+ }
+
+ if (link_state != DWC3_LINK_STATE_SS_DIS) {
+ /* Restore non EP0 EPs */
+ ret = restore_eps(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "Failed restoring non-EP0 states\n");
+ goto err;
+ }
+ }
+
+ /* clear the flag */
+ dwc->is_hibernated = false;
+
+ if (enter_hiber) {
+ /*
+ * as the wakeup was because of the spurious signals,
+ * enter hibernation again
+ */
+ gadget_hibernation_interrupt(dwc);
+ return;
+ }
+
+ dev_info(dwc->dev, "We are back from hibernation!\n");
+ return;
+
+err:
+ dev_err(dwc->dev, "Fail in handling Wakeup Interrupt\n");
+}
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 4252fad1d184..56e02a561a0b 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -8,9 +8,17 @@
*/
#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/usb/xhci_pdriver.h>
#include "core.h"
+void dwc3_host_wakeup_capable(struct device *dev, bool wakeup)
+{
+ dwc3_simple_wakeup_capable(dev, wakeup);
+}
+EXPORT_SYMBOL(dwc3_host_wakeup_capable);
+
static int dwc3_host_get_irq(struct dwc3 *dwc)
{
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
@@ -96,6 +104,10 @@ int dwc3_host_init(struct dwc3 *dwc)
if (dwc->usb2_lpm_disable)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable");
+ if (device_property_read_bool(&dwc3_pdev->dev,
+ "snps,xhci-stream-quirk"))
+ props[prop_idx++].name = "xhci-stream-quirk";
+
/**
* WORKAROUND: dwc3 revisions <=3.00a have a limitation
* where Port Disable command doesn't work.
@@ -121,6 +133,17 @@ int dwc3_host_init(struct dwc3 *dwc)
phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy",
dev_name(dwc->dev));
+ if (dwc->dr_mode == USB_DR_MODE_OTG) {
+ struct usb_phy *phy = usb_get_phy(USB_PHY_TYPE_USB3);
+
+ if (!IS_ERR(phy)) {
+ if (phy && phy->otg)
+ otg_set_host(phy->otg,
+ (struct usb_bus *)0xdeadbeef);
+ usb_put_phy(phy);
+ }
+ }
+
ret = platform_device_add(xhci);
if (ret) {
dev_err(dwc->dev, "failed to register xHCI device\n");
diff --git a/drivers/usb/dwc3/otg.c b/drivers/usb/dwc3/otg.c
new file mode 100644
index 000000000000..247f942e7078
--- /dev/null
+++ b/drivers/usb/dwc3/otg.c
@@ -0,0 +1,2199 @@
+/**
+ * otg.c - DesignWare USB3 DRD Controller OTG file
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Author: Manish Narani <mnarani@xilinx.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+#include <linux/sysfs.h>
+
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/phy.h>
+
+#include <../drivers/usb/host/xhci.h>
+#include "platform_data.h"
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+#include "otg.h"
+
+#include <linux/ulpi/regs.h>
+#include <linux/ulpi/driver.h>
+#include "debug.h"
+
+/* Print the hardware registers' value for debugging purpose */
+static void print_debug_regs(struct dwc3_otg *otg)
+{
+ u32 gctl = otg_read(otg, DWC3_GCTL);
+ u32 gsts = otg_read(otg, DWC3_GSTS);
+ u32 gdbgltssm = otg_read(otg, DWC3_GDBGLTSSM);
+ u32 gusb2phycfg0 = otg_read(otg, DWC3_GUSB2PHYCFG(0));
+ u32 gusb3pipectl0 = otg_read(otg, DWC3_GUSB3PIPECTL(0));
+ u32 dcfg = otg_read(otg, DWC3_DCFG);
+ u32 dctl = otg_read(otg, DWC3_DCTL);
+ u32 dsts = otg_read(otg, DWC3_DSTS);
+ u32 ocfg = otg_read(otg, OCFG);
+ u32 octl = otg_read(otg, OCTL);
+ u32 oevt = otg_read(otg, OEVT);
+ u32 oevten = otg_read(otg, OEVTEN);
+ u32 osts = otg_read(otg, OSTS);
+
+ otg_info(otg, "gctl = %08x\n", gctl);
+ otg_info(otg, "gsts = %08x\n", gsts);
+ otg_info(otg, "gdbgltssm = %08x\n", gdbgltssm);
+ otg_info(otg, "gusb2phycfg0 = %08x\n", gusb2phycfg0);
+ otg_info(otg, "gusb3pipectl0 = %08x\n", gusb3pipectl0);
+ otg_info(otg, "dcfg = %08x\n", dcfg);
+ otg_info(otg, "dctl = %08x\n", dctl);
+ otg_info(otg, "dsts = %08x\n", dsts);
+ otg_info(otg, "ocfg = %08x\n", ocfg);
+ otg_info(otg, "octl = %08x\n", octl);
+ otg_info(otg, "oevt = %08x\n", oevt);
+ otg_info(otg, "oevten = %08x\n", oevten);
+ otg_info(otg, "osts = %08x\n", osts);
+}
+
+/* Check whether the hardware supports HNP or not */
+static int hnp_capable(struct dwc3_otg *otg)
+{
+ if (otg->hwparams6 & GHWPARAMS6_HNP_SUPPORT_ENABLED)
+ return 1;
+ return 0;
+}
+
+/* Check whether the hardware supports SRP or not */
+static int srp_capable(struct dwc3_otg *otg)
+{
+ if (otg->hwparams6 & GHWPARAMS6_SRP_SUPPORT_ENABLED)
+ return 1;
+ return 0;
+}
+
+/* Wakeup main thread to execute the OTG flow after an event */
+static void wakeup_main_thread(struct dwc3_otg *otg)
+{
+ if (!otg->main_thread)
+ return;
+
+ otg_vdbg(otg, "\n");
+ /* Tell the main thread that something has happened */
+ otg->main_wakeup_needed = 1;
+ wake_up_interruptible(&otg->main_wq);
+}
+
+/* Sleep main thread for 'msecs' to wait for an event to occur */
+static int sleep_main_thread_timeout(struct dwc3_otg *otg, int msecs)
+{
+ signed long jiffies;
+ int rc = msecs;
+
+ if (signal_pending(current)) {
+ otg_dbg(otg, "Main thread signal pending\n");
+ rc = -EINTR;
+ goto done;
+ }
+ if (otg->main_wakeup_needed) {
+ otg_dbg(otg, "Main thread wakeup needed\n");
+ rc = msecs;
+ goto done;
+ }
+
+ jiffies = msecs_to_jiffies(msecs);
+ rc = wait_event_freezable_timeout(otg->main_wq,
+ otg->main_wakeup_needed,
+ jiffies);
+
+ if (rc > 0)
+ rc = jiffies_to_msecs(rc);
+
+done:
+ otg->main_wakeup_needed = 0;
+ return rc;
+}
+
+/* Sleep main thread to wait for an event to occur */
+static int sleep_main_thread(struct dwc3_otg *otg)
+{
+ int rc;
+
+ do {
+ rc = sleep_main_thread_timeout(otg, 5000);
+ } while (rc == 0);
+
+ return rc;
+}
+
+static void get_events(struct dwc3_otg *otg, u32 *otg_events, u32 *user_events)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&otg->lock, flags);
+
+ if (otg_events)
+ *otg_events = otg->otg_events;
+
+ if (user_events)
+ *user_events = otg->user_events;
+
+ spin_unlock_irqrestore(&otg->lock, flags);
+}
+
+static void get_and_clear_events(struct dwc3_otg *otg, u32 *otg_events,
+ u32 *user_events)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&otg->lock, flags);
+
+ if (otg_events)
+ *otg_events = otg->otg_events;
+
+ if (user_events)
+ *user_events = otg->user_events;
+
+ otg->otg_events = 0;
+ otg->user_events = 0;
+
+ spin_unlock_irqrestore(&otg->lock, flags);
+}
+
+static int check_event(struct dwc3_otg *otg, u32 otg_mask, u32 user_mask)
+{
+ u32 otg_events;
+ u32 user_events;
+
+ get_events(otg, &otg_events, &user_events);
+ if ((otg_events & otg_mask) || (user_events & user_mask)) {
+ otg_dbg(otg, "Event occurred: otg_events=%x, otg_mask=%x, \
+ user_events=%x, user_mask=%x\n", otg_events,
+ otg_mask, user_events, user_mask);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int sleep_until_event(struct dwc3_otg *otg, u32 otg_mask, u32 user_mask,
+ u32 *otg_events, u32 *user_events, int timeout)
+{
+ int rc;
+
+ /* Enable the events */
+ if (otg_mask)
+ otg_write(otg, OEVTEN, otg_mask);
+
+ /* Wait until it occurs, or timeout, or interrupt. */
+ if (timeout) {
+ otg_vdbg(otg, "Waiting for event (timeout=%d)...\n", timeout);
+ rc = sleep_main_thread_until_condition_timeout(otg,
+ check_event(otg, otg_mask, user_mask), timeout);
+ } else {
+ otg_vdbg(otg, "Waiting for event (no timeout)...\n");
+ rc = sleep_main_thread_until_condition(otg,
+ check_event(otg, otg_mask, user_mask));
+ }
+
+ /* Disable the events */
+ otg_write(otg, OEVTEN, 0);
+
+ otg_vdbg(otg, "Woke up rc=%d\n", rc);
+ if (rc >= 0)
+ get_and_clear_events(otg, otg_events, user_events);
+
+ return rc;
+}
+
+static void set_capabilities(struct dwc3_otg *otg)
+{
+ u32 ocfg = 0;
+
+ otg_dbg(otg, "\n");
+ if (srp_capable(otg))
+ ocfg |= OCFG_SRP_CAP;
+
+ if (hnp_capable(otg))
+ ocfg |= OCFG_HNP_CAP;
+
+ otg_write(otg, OCFG, ocfg);
+
+ otg_dbg(otg, "Enabled SRP and HNP capabilities in OCFG\n");
+}
+
+static int otg3_handshake(struct dwc3_otg *otg, u32 reg, u32 mask, u32 done,
+ u32 msec)
+{
+ u32 result;
+ u32 usec = msec * 1000;
+
+ otg_vdbg(otg, "reg=%08x, mask=%08x, value=%08x\n", reg, mask, done);
+ do {
+ result = otg_read(otg, reg);
+ if ((result & mask) == done)
+ return 1;
+ udelay(1);
+ usec -= 1;
+ } while (usec > 0);
+
+ return 0;
+}
+
+static int reset_port(struct dwc3_otg *otg)
+{
+ otg_dbg(otg, "\n");
+ if (!otg->otg.host)
+ return -ENODEV;
+ return usb_bus_start_enum(otg->otg.host, 1);
+}
+
+static int set_peri_mode(struct dwc3_otg *otg, int mode)
+{
+ u32 octl;
+
+ /* Set peri_mode */
+ octl = otg_read(otg, OCTL);
+ if (mode)
+ octl |= OCTL_PERI_MODE;
+ else
+ octl &= ~OCTL_PERI_MODE;
+
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "set OCTL PERI_MODE = %d in OCTL\n", mode);
+
+ if (mode)
+ return otg3_handshake(otg, OSTS, OSTS_PERIP_MODE,
+ OSTS_PERIP_MODE, 100);
+ else
+ return otg3_handshake(otg, OSTS, OSTS_PERIP_MODE, 0, 100);
+
+ msleep(20);
+}
+
+static int start_host(struct dwc3_otg *otg)
+{
+ int ret = -ENODEV;
+ int flg;
+ u32 octl;
+ u32 osts;
+ u32 ocfg;
+ u32 dctl;
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host)
+ return -ENODEV;
+
+ /*
+ * Prevent the host USBCMD.HCRST from resetting OTG core by setting
+ * OCFG.OTGSftRstMsk
+ */
+ ocfg = otg_read(otg, OCFG);
+ ocfg |= DWC3_OCFG_SFTRSTMASK;
+ otg_write(otg, OCFG, ocfg);
+
+ dctl = otg_read(otg, DCTL);
+ if (dctl & DWC3_DCTL_RUN_STOP) {
+ otg_dbg(otg, "Disabling the RUN/STOP bit\n");
+ dctl &= ~DWC3_DCTL_RUN_STOP;
+ otg_write(otg, DCTL, dctl);
+ }
+
+ if (!set_peri_mode(otg, PERI_MODE_HOST)) {
+ otg_err(otg, "Failed to start host\n");
+ return -EINVAL;
+ }
+
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ otg_dbg(otg, "hcd=%p xhci=%p\n", hcd, xhci);
+
+ if (otg->host_started) {
+ otg_info(otg, "Host already started\n");
+ goto skip;
+ }
+
+ /* Start host driver */
+
+ *(struct xhci_hcd **)hcd->hcd_priv = xhci;
+ ret = usb_add_hcd(hcd, otg->hcd_irq, IRQF_SHARED);
+ if (ret) {
+ otg_err(otg, "%s: failed to start primary hcd, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ *(struct xhci_hcd **)xhci->shared_hcd->hcd_priv = xhci;
+ if (xhci->shared_hcd) {
+ ret = usb_add_hcd(xhci->shared_hcd, otg->hcd_irq, IRQF_SHARED);
+ if (ret) {
+ otg_err(otg,
+ "%s: failed to start secondary hcd, ret=%d\n",
+ __func__, ret);
+ usb_remove_hcd(hcd);
+ return ret;
+ }
+ }
+
+ otg->host_started = 1;
+skip:
+ hcd->self.otg_port = 1;
+ if (xhci->shared_hcd)
+ xhci->shared_hcd->self.otg_port = 1;
+
+ set_capabilities(otg);
+
+ /* Power the port only for A-host */
+ if (otg->otg.state == OTG_STATE_A_WAIT_VRISE) {
+ /* Spin on xhciPrtPwr bit until it becomes 1 */
+ osts = otg_read(otg, OSTS);
+ flg = otg3_handshake(otg, OSTS,
+ OSTS_XHCI_PRT_PWR,
+ OSTS_XHCI_PRT_PWR,
+ 1000);
+ if (flg) {
+ otg_dbg(otg, "Port is powered by xhci-hcd\n");
+ /* Set port power control bit */
+ octl = otg_read(otg, OCTL);
+ octl |= OCTL_PRT_PWR_CTL;
+ otg_write(otg, OCTL, octl);
+ } else {
+ otg_dbg(otg, "Port is not powered by xhci-hcd\n");
+ }
+ }
+
+ return ret;
+}
+
+static int stop_host(struct dwc3_otg *otg)
+{
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "\n");
+
+ if (!otg->host_started) {
+ otg_info(otg, "Host already stopped\n");
+ return 1;
+ }
+
+ if (!otg->otg.host)
+ return -ENODEV;
+
+ otg_dbg(otg, "%s: turn off host %s\n",
+ __func__, otg->otg.host->bus_name);
+
+ if (work_pending(&otg->hp_work.work)) {
+ while (!cancel_delayed_work(&otg->hp_work))
+ msleep(20);
+ }
+
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+
+ if (xhci->shared_hcd)
+ usb_remove_hcd(xhci->shared_hcd);
+ usb_remove_hcd(hcd);
+
+ otg->host_started = 0;
+ otg->dev_enum = 0;
+ return 0;
+}
+
+int dwc3_otg_host_release(struct usb_hcd *hcd)
+{
+ struct usb_bus *bus;
+ struct usb_device *rh;
+ struct usb_device *udev;
+
+ if (!hcd)
+ return -EINVAL;
+
+ bus = &hcd->self;
+ if (!bus->otg_port)
+ return 0;
+
+ rh = bus->root_hub;
+ udev = usb_hub_find_child(rh, bus->otg_port);
+ if (!udev)
+ return 0;
+
+ if (udev->config && udev->parent == udev->bus->root_hub) {
+ struct usb_otg20_descriptor *desc;
+
+ if (__usb_get_extra_descriptor(udev->rawdescriptors[0],
+ le16_to_cpu(udev->config[0].desc.wTotalLength),
+ USB_DT_OTG, (void **) &desc) == 0) {
+ int err;
+
+ dev_info(&udev->dev, "found OTG descriptor\n");
+ if ((desc->bcdOTG >= 0x0200) &&
+ (udev->speed == USB_SPEED_HIGH)) {
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_TEST_MODE,
+ 7 << 8,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (err < 0) {
+ dev_info(&udev->dev,
+ "can't initiate HNP from host: %d\n",
+ err);
+ return -1;
+ }
+ }
+ } else {
+ dev_info(&udev->dev, "didn't find OTG descriptor\n");
+ }
+ } else {
+ dev_info(&udev->dev,
+ "udev->config NULL or udev->parent != udev->bus->root_hub\n");
+ }
+
+ return 0;
+}
+
+/* Sends the host release set feature request */
+static void host_release(struct dwc3_otg *otg)
+{
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "\n");
+ if (!otg->otg.host)
+ return;
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ dwc3_otg_host_release(hcd);
+ if (xhci->shared_hcd)
+ dwc3_otg_host_release(xhci->shared_hcd);
+}
+
+static void dwc3_otg_setup_event_buffers(struct dwc3_otg *otg)
+{
+ if (dwc3_readl(otg->dwc->regs, DWC3_GEVNTADRLO(0)) == 0x0) {
+
+ otg_dbg(otg, "setting up event buffers\n");
+ dwc3_event_buffers_setup(otg->dwc);
+ }
+
+}
+
+static void start_peripheral(struct dwc3_otg *otg)
+{
+ struct usb_gadget *gadget = otg->otg.gadget;
+ struct dwc3 *dwc = otg->dwc;
+ u32 ocfg;
+
+ otg_dbg(otg, "\n");
+ if (!gadget)
+ return;
+
+ /*
+ * Prevent the gadget DCTL.CSFTRST from resetting OTG core by setting
+ * OCFG.OTGSftRstMsk
+ */
+ ocfg = otg_read(otg, OCFG);
+ ocfg |= DWC3_OCFG_SFTRSTMASK;
+ otg_write(otg, OCFG, ocfg);
+
+ if (!set_peri_mode(otg, PERI_MODE_PERIPHERAL))
+ otg_err(otg, "Failed to set peripheral mode\n");
+
+ if (otg->peripheral_started) {
+ otg_info(otg, "Peripheral already started\n");
+ return;
+ }
+
+ set_capabilities(otg);
+
+ dwc3_otg_setup_event_buffers(otg);
+
+ if (dwc->gadget_driver) {
+ struct dwc3_ep *dep;
+ int ret;
+
+ spin_lock(&otg->lock);
+ dep = dwc->eps[0];
+
+ ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
+ if (ret)
+ goto err0;
+
+ dep = dwc->eps[1];
+
+ ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
+ if (ret)
+ goto err1;
+
+ otg_dbg(otg, "enabled ep in gadget driver\n");
+ /* begin to receive SETUP packets */
+ dwc->ep0state = EP0_SETUP_PHASE;
+ dwc3_ep0_out_start(dwc);
+
+ otg_dbg(otg, "enabled irq\n");
+ dwc3_gadget_enable_irq(dwc);
+
+ otg_write(otg, DCTL, otg_read(otg, DCTL) | DCTL_RUN_STOP);
+ otg_dbg(otg, "Setting DCTL_RUN_STOP to 1 in DCTL\n");
+ spin_unlock(&otg->lock);
+ }
+
+ gadget->b_hnp_enable = 0;
+ gadget->host_request_flag = 0;
+
+ otg->peripheral_started = 1;
+
+ /*
+ * During HNP the bus shouldn't be idle for more than 155 ms, so
+ * give enough time for the host to load the stack before start
+ * triggerring events
+ */
+ msleep(500);
+
+ return;
+err1:
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+
+err0:
+ return;
+}
+
+static void stop_peripheral(struct dwc3_otg *otg)
+{
+ struct usb_gadget *gadget = otg->otg.gadget;
+ struct dwc3 *dwc = otg->dwc;
+
+ otg_dbg(otg, "\n");
+
+ if (!otg->peripheral_started) {
+ otg_info(otg, "Peripheral already stopped\n");
+ return;
+ }
+
+ if (!gadget)
+ return;
+
+ otg_dbg(otg, "disabled ep in gadget driver\n");
+ spin_lock(&otg->lock);
+
+ dwc3_gadget_disable_irq(dwc);
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+ __dwc3_gadget_ep_disable(dwc->eps[1]);
+
+ spin_unlock(&otg->lock);
+
+ otg->peripheral_started = 0;
+ msleep(20);
+}
+
+static void set_b_host(struct dwc3_otg *otg, int val)
+{
+ otg->otg.host->is_b_host = val;
+}
+
+static enum usb_otg_state do_b_idle(struct dwc3_otg *otg);
+
+static int init_b_device(struct dwc3_otg *otg)
+{
+ otg_dbg(otg, "\n");
+ set_capabilities(otg);
+
+ if (!set_peri_mode(otg, PERI_MODE_PERIPHERAL))
+ otg_err(otg, "Failed to start peripheral\n");
+
+ return do_b_idle(otg);
+}
+
+static int init_a_device(struct dwc3_otg *otg)
+{
+ otg_write(otg, OCFG, 0);
+ otg_write(otg, OCTL, 0);
+
+ otg_dbg(otg, "Write 0 to OCFG and OCTL\n");
+ return OTG_STATE_A_IDLE;
+}
+
+static enum usb_otg_state do_connector_id_status(struct dwc3_otg *otg)
+{
+ enum usb_otg_state state;
+ u32 osts;
+
+ otg_dbg(otg, "\n");
+
+ otg_write(otg, OCFG, 0);
+ otg_write(otg, OEVTEN, 0);
+ otg_write(otg, OEVT, 0xffffffff);
+ otg_write(otg, OEVTEN, OEVT_CONN_ID_STS_CHNG_EVNT);
+
+ msleep(60);
+
+ osts = otg_read(otg, OSTS);
+ if (!(osts & OSTS_CONN_ID_STS)) {
+ otg_dbg(otg, "Connector ID is A\n");
+ state = init_a_device(otg);
+ } else {
+ otg_dbg(otg, "Connector ID is B\n");
+ stop_host(otg);
+ state = init_b_device(otg);
+ }
+
+ /* TODO: This is a workaround for latest hibernation-enabled bitfiles
+ * which have problems before initializing SRP.
+ */
+ msleep(50);
+
+ return state;
+}
+
+static void reset_hw(struct dwc3_otg *otg)
+{
+ u32 temp;
+
+ otg_dbg(otg, "\n");
+
+ otg_write(otg, OEVTEN, 0);
+ temp = otg_read(otg, OCTL);
+ temp &= OCTL_PERI_MODE;
+ otg_write(otg, OCTL, temp);
+ temp = otg_read(otg, GCTL);
+ temp |= GCTL_PRT_CAP_DIR_OTG << GCTL_PRT_CAP_DIR_SHIFT;
+ otg_write(otg, GCTL, temp);
+}
+
+#define SRP_TIMEOUT 6000
+
+static void start_srp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl |= OCTL_SES_REQ;
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "set OCTL_SES_REQ in OCTL\n");
+}
+
+static void start_b_hnp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl |= OCTL_HNP_REQ | OCTL_DEV_SET_HNP_EN;
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "set (OCTL_HNP_REQ | OCTL_DEV_SET_HNP_EN) in OCTL\n");
+}
+
+static void stop_b_hnp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl &= ~(OCTL_HNP_REQ | OCTL_DEV_SET_HNP_EN);
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "Clear ~(OCTL_HNP_REQ | OCTL_DEV_SET_HNP_EN) in OCTL\n");
+}
+
+static void start_a_hnp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl |= OCTL_HST_SET_HNP_EN;
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "set OCTL_HST_SET_HNP_EN in OCTL\n");
+}
+
+static void stop_a_hnp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl &= ~OCTL_HST_SET_HNP_EN;
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "clear OCTL_HST_SET_HNP_EN in OCTL\n");
+}
+
+static enum usb_otg_state do_a_hnp_init(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 otg_events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_HNP_CHNG_EVNT;
+
+ start_a_hnp(otg);
+ rc = 3000;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &otg_events, NULL, rc);
+ stop_a_hnp(otg);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_HNP_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_HNP_CHNG_EVNT\n");
+ if (otg_events & OEVT_HST_NEG_SCS) {
+ otg_dbg(otg, "A-HNP Success\n");
+ return OTG_STATE_A_PERIPHERAL;
+
+ } else {
+ otg_dbg(otg, "A-HNP Failed\n");
+ return OTG_STATE_A_WAIT_VFALL;
+ }
+
+ } else if (rc == 0) {
+ otg_dbg(otg, "A-HNP Failed (Timed out)\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else {
+ goto again;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_a_host(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_SESS_END_DET_EVNT;
+ user_mask = USER_SRP_EVENT |
+ USER_HNP_EVENT;
+
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else if (user_events & USER_HNP_EVENT) {
+ otg_dbg(otg, "USER_HNP_EVENT\n");
+ return OTG_STATE_A_SUSPEND;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+#define A_WAIT_VFALL_TIMEOUT 1000
+
+static enum usb_otg_state do_a_wait_vfall(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 otg_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_A_DEV_IDLE_EVNT;
+
+ rc = A_WAIT_VFALL_TIMEOUT;
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &otg_events, NULL, rc);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_A_DEV_IDLE_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_IDLE_EVNT\n");
+ return OTG_STATE_A_IDLE;
+
+ } else if (rc == 0) {
+ otg_dbg(otg, "A_WAIT_VFALL_TIMEOUT\n");
+ return OTG_STATE_A_IDLE;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+
+}
+
+#define A_WAIT_BCON_TIMEOUT 1000
+
+static enum usb_otg_state do_a_wait_bconn(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 otg_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_SESS_END_DET_EVNT |
+ OEVT_A_DEV_HOST_EVNT;
+
+ rc = A_WAIT_BCON_TIMEOUT;
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &otg_events, NULL, rc);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else if (otg_events & OEVT_A_DEV_HOST_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_HOST_EVNT\n");
+ return OTG_STATE_A_HOST;
+
+ } else if (rc == 0) {
+ if (otg_read(otg, OCTL) & OCTL_PRT_PWR_CTL)
+ return OTG_STATE_A_HOST;
+ else
+ return OTG_STATE_A_WAIT_VFALL;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+#define A_WAIT_VRISE_TIMEOUT 100
+
+static enum usb_otg_state do_a_wait_vrise(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 otg_events = 0;
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "");
+ set_b_host(otg, 0);
+ start_host(otg);
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ usb_kick_hub_wq(hcd->self.root_hub);
+ if (xhci->shared_hcd)
+ usb_kick_hub_wq(xhci->shared_hcd->self.root_hub);
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_SESS_END_DET_EVNT;
+
+ rc = A_WAIT_VRISE_TIMEOUT;
+
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &otg_events, NULL, rc);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else if (rc == 0) {
+ if (otg_read(otg, OCTL) & OCTL_PRT_PWR_CTL)
+ return OTG_STATE_A_WAIT_BCON;
+ else
+ return OTG_STATE_A_WAIT_VFALL;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_a_idle(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT | OEVT_A_DEV_SRP_DET_EVNT;
+ user_mask = USER_SRP_EVENT;
+
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events,
+ 0);
+
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (otg_events & OEVT_A_DEV_SRP_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SRP_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VRISE;
+ } else if (user_events & USER_SRP_EVENT) {
+ otg_dbg(otg, "User initiated VBUS\n");
+ return OTG_STATE_A_WAIT_VRISE;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_a_peripheral(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_SESS_END_DET_EVNT |
+ OEVT_A_DEV_B_DEV_HOST_END_EVNT;
+ user_mask = USER_HNP_END_SESSION;
+
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else if (otg_events & OEVT_A_DEV_B_DEV_HOST_END_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_B_DEV_HOST_END_EVNT\n");
+ return OTG_STATE_A_WAIT_VRISE;
+ } else if (user_events & USER_HNP_END_SESSION) {
+ otg_dbg(otg, "USER_HNP_END_SESSION\n");
+ return OTG_STATE_A_WAIT_VRISE;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+#define HNP_TIMEOUT 4000
+
+static enum usb_otg_state do_b_hnp_init(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_HNP_CHNG_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT;
+
+ start_b_hnp(otg);
+ rc = HNP_TIMEOUT;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &events, NULL, rc);
+ stop_b_hnp(otg);
+
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (events & OEVT_B_DEV_VBUS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+ return OTG_STATE_B_IDLE;
+ } else if (events & OEVT_B_DEV_HNP_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_HNP_CHNG_EVNT\n");
+ if (events & OEVT_HST_NEG_SCS) {
+ otg_dbg(otg, "B-HNP Success\n");
+ return OTG_STATE_B_WAIT_ACON;
+
+ } else {
+ otg_err(otg, "B-HNP Failed\n");
+ return OTG_STATE_B_PERIPHERAL;
+ }
+ } else if (rc == 0) {
+ /* Timeout */
+ otg_err(otg, "HNP timed out!\n");
+ return OTG_STATE_B_PERIPHERAL;
+
+ } else {
+ goto again;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_peripheral(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT | OEVT_B_DEV_VBUS_CHNG_EVNT;
+ user_mask = USER_HNP_EVENT | USER_END_SESSION |
+ USER_SRP_EVENT | INITIAL_SRP;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (otg_events & OEVT_B_DEV_VBUS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+
+ if (otg_events & OEVT_B_SES_VLD_EVT) {
+ otg_dbg(otg, "Session valid\n");
+ goto again;
+ } else {
+ otg_dbg(otg, "Session not valid\n");
+ return OTG_STATE_B_IDLE;
+ }
+
+ } else if (user_events & USER_HNP_EVENT) {
+ otg_dbg(otg, "USER_HNP_EVENT\n");
+ return do_b_hnp_init(otg);
+ } else if (user_events & USER_END_SESSION) {
+ otg_dbg(otg, "USER_END_SESSION\n");
+ return OTG_STATE_B_IDLE;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_wait_acon(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask = 0;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "");
+ set_b_host(otg, 1);
+ start_host(otg);
+ otg_mask = OEVT_B_DEV_B_HOST_END_EVNT;
+ otg_write(otg, OEVTEN, otg_mask);
+ reset_port(otg);
+
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ usb_kick_hub_wq(hcd->self.root_hub);
+ if (xhci->shared_hcd)
+ usb_kick_hub_wq(xhci->shared_hcd->self.root_hub);
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_B_HOST_END_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT |
+ OEVT_HOST_ROLE_REQ_INIT_EVNT;
+ user_mask = USER_A_CONN_EVENT | USER_HNP_END_SESSION;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (otg_events & OEVT_B_DEV_B_HOST_END_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_B_HOST_END_EVNT\n");
+ return OTG_STATE_B_PERIPHERAL;
+ } else if (otg_events & OEVT_B_DEV_VBUS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+ if (otg_events & OEVT_B_SES_VLD_EVT) {
+ otg_dbg(otg, "Session valid\n");
+ goto again;
+ } else {
+ otg_dbg(otg, "Session not valid\n");
+ return OTG_STATE_B_IDLE;
+ }
+ } else if (user_events & USER_A_CONN_EVENT) {
+ otg_dbg(otg, "A-device connected\n");
+ return OTG_STATE_B_HOST;
+ } else if (user_events & USER_HNP_END_SESSION) {
+ otg_dbg(otg, "USER_HNP_END_SESSION\n");
+ return OTG_STATE_B_PERIPHERAL;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_host(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask = 0;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_B_HOST_END_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT |
+ OEVT_HOST_ROLE_REQ_INIT_EVNT;
+ user_mask = USER_HNP_END_SESSION;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (otg_events & OEVT_B_DEV_B_HOST_END_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_B_HOST_END_EVNT\n");
+ return OTG_STATE_B_PERIPHERAL;
+ } else if (otg_events & OEVT_B_DEV_VBUS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+ if (otg_events & OEVT_B_SES_VLD_EVT) {
+ otg_dbg(otg, "Session valid\n");
+ goto again;
+ } else {
+ otg_dbg(otg, "Session not valid\n");
+ return OTG_STATE_B_IDLE;
+ }
+ } else if (user_events & USER_HNP_END_SESSION) {
+ otg_dbg(otg, "USER_HNP_END_SESSION\n");
+ return OTG_STATE_B_PERIPHERAL;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_idle(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+
+ if (!set_peri_mode(otg, PERI_MODE_PERIPHERAL))
+ otg_err(otg, "Failed to set peripheral mode\n");
+
+ dwc3_otg_setup_event_buffers(otg);
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_SES_VLD_DET_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT;
+ user_mask = USER_SRP_EVENT;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if ((otg_events & OEVT_B_DEV_VBUS_CHNG_EVNT) ||
+ (otg_events & OEVT_B_DEV_SES_VLD_DET_EVNT)) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+ if (otg_events & OEVT_B_SES_VLD_EVT) {
+ otg_dbg(otg, "Session valid\n");
+ return OTG_STATE_B_PERIPHERAL;
+
+ } else {
+ otg_dbg(otg, "Session not valid\n");
+ goto again;
+ }
+ } else if (user_events & USER_SRP_EVENT) {
+ otg_dbg(otg, "USER_SRP_EVENT\n");
+ return OTG_STATE_B_SRP_INIT;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_srp_init(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_SES_VLD_DET_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT;
+
+ otg_write(otg, OEVTEN, otg_mask);
+ start_srp(otg);
+
+ rc = SRP_TIMEOUT;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &events, NULL, rc);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (events & OEVT_B_DEV_SES_VLD_DET_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_SES_VLD_DET_EVNT\n");
+ return OTG_STATE_B_PERIPHERAL;
+ } else if (rc == 0) {
+ otg_dbg(otg, "SRP Timeout (rc=%d)\n", rc);
+ otg_info(otg, "DEVICE NO RESPONSE FOR SRP\n");
+ return OTG_STATE_B_IDLE;
+
+ } else {
+ goto again;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+int otg_main_thread(void *data)
+{
+ struct dwc3_otg *otg = (struct dwc3_otg *)data;
+ enum usb_otg_state prev = OTG_STATE_UNDEFINED;
+
+#ifdef VERBOSE_DEBUG
+ u32 snpsid = otg_read(otg, 0xc120);
+
+ otg_vdbg(otg, "io_priv=%p\n", otg->regs);
+ otg_vdbg(otg, "c120: %x\n", snpsid);
+#endif
+
+ /* Allow the thread to be killed by a signal, but set the signal mask
+ * to block everything but INT, TERM, KILL, and USR1.
+ */
+ allow_signal(SIGINT);
+ allow_signal(SIGTERM);
+ allow_signal(SIGKILL);
+ allow_signal(SIGUSR1);
+
+ /* Allow the thread to be frozen */
+ set_freezable();
+
+ /* Allow host/peripheral driver load to finish */
+ msleep(100);
+
+ reset_hw(otg);
+
+ stop_host(otg);
+ stop_peripheral(otg);
+
+ otg_dbg(otg, "Thread running\n");
+ while (1) {
+ enum usb_otg_state next = OTG_STATE_UNDEFINED;
+
+ otg_vdbg(otg, "Main thread entering state\n");
+
+ switch (otg->otg.state) {
+ case OTG_STATE_UNDEFINED:
+ otg_dbg(otg, "OTG_STATE_UNDEFINED\n");
+ next = do_connector_id_status(otg);
+ break;
+
+ case OTG_STATE_A_IDLE:
+ otg_dbg(otg, "OTG_STATE_A_IDLE\n");
+ stop_peripheral(otg);
+
+ if (prev == OTG_STATE_UNDEFINED)
+ next = OTG_STATE_A_WAIT_VRISE;
+ else
+ next = do_a_idle(otg);
+ break;
+
+ case OTG_STATE_A_WAIT_VRISE:
+ otg_dbg(otg, "OTG_STATE_A_WAIT_VRISE\n");
+ next = do_a_wait_vrise(otg);
+ break;
+
+ case OTG_STATE_A_WAIT_BCON:
+ otg_dbg(otg, "OTG_STATE_A_WAIT_BCON\n");
+ next = do_a_wait_bconn(otg);
+ break;
+
+ case OTG_STATE_A_HOST:
+ otg_dbg(otg, "OTG_STATE_A_HOST\n");
+ stop_peripheral(otg);
+ next = do_a_host(otg);
+ /* Don't stop the host here if we are going into
+ * A_SUSPEND. We need to delay that until later. It
+ * will be stopped when coming out of A_SUSPEND
+ * state.
+ */
+ if (next != OTG_STATE_A_SUSPEND)
+ stop_host(otg);
+ break;
+
+ case OTG_STATE_A_SUSPEND:
+ otg_dbg(otg, "OTG_STATE_A_SUSPEND\n");
+ next = do_a_hnp_init(otg);
+
+ /* Stop the host. */
+ stop_host(otg);
+ break;
+
+ case OTG_STATE_A_WAIT_VFALL:
+ otg_dbg(otg, "OTG_STATE_A_WAIT_VFALL\n");
+ next = do_a_wait_vfall(otg);
+ stop_host(otg);
+ break;
+
+ case OTG_STATE_A_PERIPHERAL:
+ otg_dbg(otg, "OTG_STATE_A_PERIPHERAL\n");
+ stop_host(otg);
+ start_peripheral(otg);
+ next = do_a_peripheral(otg);
+ stop_peripheral(otg);
+ break;
+
+ case OTG_STATE_B_IDLE:
+ otg_dbg(otg, "OTG_STATE_B_IDLE\n");
+ next = do_b_idle(otg);
+ break;
+
+ case OTG_STATE_B_PERIPHERAL:
+ otg_dbg(otg, "OTG_STATE_B_PERIPHERAL\n");
+ stop_host(otg);
+ start_peripheral(otg);
+ next = do_b_peripheral(otg);
+ stop_peripheral(otg);
+ break;
+
+ case OTG_STATE_B_SRP_INIT:
+ otg_dbg(otg, "OTG_STATE_B_SRP_INIT\n");
+ otg_read(otg, OSTS);
+ next = do_b_srp_init(otg);
+ break;
+
+ case OTG_STATE_B_WAIT_ACON:
+ otg_dbg(otg, "OTG_STATE_B_WAIT_ACON\n");
+ next = do_b_wait_acon(otg);
+ break;
+
+ case OTG_STATE_B_HOST:
+ otg_dbg(otg, "OTG_STATE_B_HOST\n");
+ next = do_b_host(otg);
+ stop_host(otg);
+ break;
+
+ default:
+ otg_err(otg, "Unknown state %d, sleeping...\n",
+ otg->state);
+ sleep_main_thread(otg);
+ break;
+ }
+
+ prev = otg->otg.state;
+ otg->otg.state = next;
+ if (kthread_should_stop())
+ break;
+ }
+
+ otg->main_thread = NULL;
+ otg_dbg(otg, "OTG main thread exiting....\n");
+
+ return 0;
+}
+
+static void start_main_thread(struct dwc3_otg *otg)
+{
+ if (!otg->main_thread && otg->otg.gadget && otg->otg.host) {
+ otg_dbg(otg, "Starting OTG main thread\n");
+ otg->main_thread = kthread_create(otg_main_thread, otg, "otg");
+ wake_up_process(otg->main_thread);
+ }
+}
+
+static inline struct dwc3_otg *otg_to_dwc3_otg(struct usb_otg *x)
+{
+ return container_of(x, struct dwc3_otg, otg);
+}
+
+static irqreturn_t dwc3_otg_irq(int irq, void *_otg)
+{
+ struct dwc3_otg *otg;
+ u32 oevt;
+ u32 osts;
+ u32 octl;
+ u32 ocfg;
+ u32 oevten;
+ u32 otg_mask = OEVT_ALL;
+
+ if (!_otg)
+ return 0;
+
+ otg = (struct dwc3_otg *)_otg;
+
+ oevt = otg_read(otg, OEVT);
+ osts = otg_read(otg, OSTS);
+ octl = otg_read(otg, OCTL);
+ ocfg = otg_read(otg, OCFG);
+ oevten = otg_read(otg, OEVTEN);
+
+ /* Clear handled events */
+ otg_write(otg, OEVT, oevt);
+
+ otg_vdbg(otg, "\n");
+ otg_vdbg(otg, " oevt = %08x\n", oevt);
+ otg_vdbg(otg, " osts = %08x\n", osts);
+ otg_vdbg(otg, " octl = %08x\n", octl);
+ otg_vdbg(otg, " ocfg = %08x\n", ocfg);
+ otg_vdbg(otg, " oevten = %08x\n", oevten);
+
+ otg_vdbg(otg, "oevt[DeviceMode] = %s\n",
+ oevt & OEVT_DEV_MOD_EVNT ? "Device" : "Host");
+
+ if (oevt & OEVT_CONN_ID_STS_CHNG_EVNT)
+ otg_dbg(otg, "Connector ID Status Change Event\n");
+ if (oevt & OEVT_HOST_ROLE_REQ_INIT_EVNT)
+ otg_dbg(otg, "Host Role Request Init Notification Event\n");
+ if (oevt & OEVT_HOST_ROLE_REQ_CONFIRM_EVNT)
+ otg_dbg(otg, "Host Role Request Confirm Notification Event\n");
+ if (oevt & OEVT_A_DEV_B_DEV_HOST_END_EVNT)
+ otg_dbg(otg, "A-Device B-Host End Event\n");
+ if (oevt & OEVT_A_DEV_HOST_EVNT)
+ otg_dbg(otg, "A-Device Host Event\n");
+ if (oevt & OEVT_A_DEV_HNP_CHNG_EVNT)
+ otg_dbg(otg, "A-Device HNP Change Event\n");
+ if (oevt & OEVT_A_DEV_SRP_DET_EVNT)
+ otg_dbg(otg, "A-Device SRP Detect Event\n");
+ if (oevt & OEVT_A_DEV_SESS_END_DET_EVNT)
+ otg_dbg(otg, "A-Device Session End Detected Event\n");
+ if (oevt & OEVT_B_DEV_B_HOST_END_EVNT)
+ otg_dbg(otg, "B-Device B-Host End Event\n");
+ if (oevt & OEVT_B_DEV_HNP_CHNG_EVNT)
+ otg_dbg(otg, "B-Device HNP Change Event\n");
+ if (oevt & OEVT_B_DEV_SES_VLD_DET_EVNT)
+ otg_dbg(otg, "B-Device Session Valid Detect Event\n");
+ if (oevt & OEVT_B_DEV_VBUS_CHNG_EVNT)
+ otg_dbg(otg, "B-Device VBUS Change Event\n");
+
+ if (oevt & otg_mask) {
+ /* Pass event to main thread */
+ spin_lock(&otg->lock);
+ otg->otg_events |= oevt;
+ wakeup_main_thread(otg);
+ spin_unlock(&otg->lock);
+ return 1;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void hnp_polling_work(struct work_struct *w)
+{
+ struct dwc3_otg *otg = container_of(w, struct dwc3_otg,
+ hp_work.work);
+ struct usb_bus *bus;
+ struct usb_device *udev;
+ struct usb_hcd *hcd;
+ u8 *otgstatus;
+ int ret;
+ int err;
+
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ if (!hcd)
+ return;
+
+ bus = &hcd->self;
+ if (!bus->otg_port)
+ return;
+
+ udev = usb_hub_find_child(bus->root_hub, bus->otg_port);
+ if (!udev)
+ return;
+
+ otgstatus = kmalloc(sizeof(*otgstatus), GFP_NOIO);
+ if (!otgstatus)
+ return;
+
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ USB_REQ_GET_STATUS, USB_DIR_IN | USB_RECIP_DEVICE,
+ 0, 0xf000, otgstatus, sizeof(*otgstatus),
+ USB_CTRL_GET_TIMEOUT);
+
+ if (ret == sizeof(*otgstatus) && (*otgstatus & 0x1)) {
+ /* enable HNP before suspend, it's simpler */
+
+ udev->bus->b_hnp_enable = 1;
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ udev->bus->b_hnp_enable
+ ? USB_DEVICE_B_HNP_ENABLE
+ : USB_DEVICE_A_ALT_HNP_SUPPORT,
+ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+
+ if (err < 0) {
+ /* OTG MESSAGE: report errors here,
+ * customize to match your product.
+ */
+ otg_info(otg, "ERROR : Device no response\n");
+ dev_info(&udev->dev, "can't set HNP mode: %d\n",
+ err);
+ udev->bus->b_hnp_enable = 0;
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0x1a0a) {
+ if (usb_port_suspend(udev, PMSG_AUTO_SUSPEND)
+ < 0)
+ dev_dbg(&udev->dev, "HNP fail, %d\n",
+ err);
+ }
+ } else {
+ /* Device wants role-switch, suspend the bus. */
+ static struct usb_phy *phy;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ otg_start_hnp(phy->otg);
+ usb_put_phy(phy);
+
+ if (usb_port_suspend(udev, PMSG_AUTO_SUSPEND) < 0)
+ dev_dbg(&udev->dev, "HNP fail, %d\n", err);
+ }
+ } else if (ret < 0) {
+ udev->bus->b_hnp_enable = 1;
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_B_HNP_ENABLE,
+ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (usb_port_suspend(udev, PMSG_AUTO_SUSPEND) < 0)
+ dev_dbg(&udev->dev, "HNP fail, %d\n", err);
+ } else {
+ schedule_delayed_work(&otg->hp_work, 1 * HZ);
+ }
+
+ kfree(otgstatus);
+}
+
+static int dwc3_otg_notify_connect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct usb_bus *bus;
+ struct usb_device *udev;
+ struct usb_hcd *hcd;
+ struct dwc3_otg *otg;
+ int err = 0;
+
+ otg = otg_to_dwc3_otg(phy->otg);
+
+ hcd = container_of(phy->otg->host, struct usb_hcd, self);
+ if (!hcd)
+ return -EINVAL;
+
+ bus = &hcd->self;
+ if (!bus->otg_port)
+ return 0;
+
+ udev = usb_hub_find_child(bus->root_hub, bus->otg_port);
+ if (!udev)
+ return 0;
+
+ /*
+ * OTG-aware devices on OTG-capable root hubs may be able to use SRP,
+ * to wake us after we've powered off VBUS; and HNP, switching roles
+ * "host" to "peripheral". The OTG descriptor helps figure this out.
+ */
+ if (udev->config && udev->parent == udev->bus->root_hub) {
+ struct usb_otg20_descriptor *desc = NULL;
+
+ /* descriptor may appear anywhere in config */
+ err = __usb_get_extra_descriptor(udev->rawdescriptors[0],
+ le16_to_cpu(udev->config[0].desc.wTotalLength),
+ USB_DT_OTG, (void **) &desc);
+ if (err || !(desc->bmAttributes & USB_OTG_HNP))
+ return 0;
+
+ if (udev->portnum == udev->bus->otg_port) {
+ INIT_DELAYED_WORK(&otg->hp_work,
+ hnp_polling_work);
+ schedule_delayed_work(&otg->hp_work, HZ);
+ }
+
+ }
+
+ return err;
+}
+
+static int dwc3_otg_notify_disconnect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct dwc3_otg *otg;
+
+ otg = otg_to_dwc3_otg(phy->otg);
+
+ if (work_pending(&otg->hp_work.work)) {
+ while (!cancel_delayed_work(&otg->hp_work))
+ msleep(20);
+ }
+ return 0;
+}
+
+static void dwc3_otg_set_peripheral(struct usb_otg *_otg, int yes)
+{
+ struct dwc3_otg *otg;
+
+ if (!_otg)
+ return;
+
+ otg = otg_to_dwc3_otg(_otg);
+ otg_dbg(otg, "\n");
+
+ if (yes) {
+ if (otg->hwparams6 == 0xdeadbeef)
+ otg->hwparams6 = otg_read(otg, GHWPARAMS6);
+ stop_host(otg);
+ } else {
+ stop_peripheral(otg);
+ }
+
+ set_peri_mode(otg, yes);
+}
+EXPORT_SYMBOL(dwc3_otg_set_peripheral);
+
+static int dwc3_otg_set_periph(struct usb_otg *_otg, struct usb_gadget *gadget)
+{
+ struct dwc3_otg *otg;
+
+ if (!_otg)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(_otg);
+ otg_dbg(otg, "\n");
+
+ if ((long)gadget == 1) {
+ dwc3_otg_set_peripheral(_otg, 1);
+ return 0;
+ }
+
+ if (!gadget) {
+ otg->otg.gadget = NULL;
+ return -ENODEV;
+ }
+
+ otg->otg.gadget = gadget;
+ otg->otg.gadget->hnp_polling_support = 1;
+ otg->otg.state = OTG_STATE_B_IDLE;
+
+ start_main_thread(otg);
+ return 0;
+}
+
+static int dwc3_otg_set_host(struct usb_otg *_otg, struct usb_bus *host)
+{
+ struct dwc3_otg *otg;
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ if (!_otg)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(_otg);
+ otg_dbg(otg, "\n");
+
+ if (host == (struct usb_bus *)0xdeadbeef) {
+ dwc3_otg_set_peripheral(_otg, 0);
+ return 0;
+ }
+
+ if (!host) {
+ otg->otg.host = NULL;
+ otg->hcd_irq = 0;
+ return -ENODEV;
+ }
+
+ hcd = container_of(host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ otg_dbg(otg, "hcd=%p xhci=%p\n", hcd, xhci);
+
+ hcd->self.otg_port = 1;
+ if (xhci->shared_hcd) {
+ xhci->shared_hcd->self.otg_port = 1;
+ otg_dbg(otg, "shared_hcd=%p\n", xhci->shared_hcd);
+ }
+
+ otg->otg.host = host;
+ otg->hcd_irq = hcd->irq;
+ otg_dbg(otg, "host=%p irq=%d\n", otg->otg.host, otg->hcd_irq);
+
+
+ otg->host_started = 1;
+ otg->dev_enum = 0;
+ start_main_thread(otg);
+ return 0;
+}
+
+static int dwc3_otg_start_srp(struct usb_otg *x)
+{
+ unsigned long flags;
+ struct dwc3_otg *otg;
+
+ if (!x)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(x);
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host || !otg->otg.gadget)
+ return -ENODEV;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->user_events |= USER_SRP_EVENT;
+ wakeup_main_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return 0;
+}
+
+static int dwc3_otg_start_hnp(struct usb_otg *x)
+{
+ unsigned long flags;
+ struct dwc3_otg *otg;
+
+ if (!x)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(x);
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host || !otg->otg.gadget)
+ return -ENODEV;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->user_events |= USER_HNP_EVENT;
+ wakeup_main_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return 0;
+}
+
+static int dwc3_otg_end_session(struct usb_otg *x)
+{
+ unsigned long flags;
+ struct dwc3_otg *otg;
+
+ if (!x)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(x);
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host || !otg->otg.gadget)
+ return -ENODEV;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->user_events |= USER_END_SESSION;
+ wakeup_main_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return 0;
+}
+
+static int otg_end_session(struct usb_otg *otg)
+{
+ return dwc3_otg_end_session(otg);
+}
+EXPORT_SYMBOL(otg_end_session);
+
+static int dwc3_otg_received_host_release(struct usb_otg *x)
+{
+ struct dwc3_otg *otg;
+ unsigned long flags;
+
+ if (!x)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(x);
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host || !otg->otg.gadget)
+ return -ENODEV;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->user_events |= PCD_RECEIVED_HOST_RELEASE_EVENT;
+ wakeup_main_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return 0;
+}
+
+int otg_host_release(struct usb_otg *otg)
+{
+ return dwc3_otg_received_host_release(otg);
+}
+EXPORT_SYMBOL(otg_host_release);
+
+static void dwc3_otg_enable_irq(struct dwc3_otg *otg)
+{
+ u32 reg;
+
+ /* Enable OTG IRQs */
+ reg = OEVT_ALL;
+
+ otg_write(otg, OEVTEN, reg);
+}
+
+static ssize_t store_srp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (IS_ERR(phy) || !phy) {
+ if (!IS_ERR(phy))
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg = phy->otg;
+ if (!otg) {
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg_start_srp(otg);
+ usb_put_phy(phy);
+ return count;
+}
+static DEVICE_ATTR(srp, 0220, NULL, store_srp);
+
+static ssize_t store_end(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (IS_ERR(phy) || !phy) {
+ if (!IS_ERR(phy))
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg = phy->otg;
+ if (!otg) {
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg_end_session(otg);
+ usb_put_phy(phy);
+ return count;
+}
+static DEVICE_ATTR(end, 0220, NULL, store_end);
+
+static ssize_t store_hnp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct usb_phy *phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ struct usb_otg *otg;
+
+ dev_dbg(dwc->dev, "%s()\n", __func__);
+
+ if (IS_ERR(phy) || !phy) {
+ dev_info(dwc->dev, "NO PHY!!\n");
+ if (!IS_ERR(phy))
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg = phy->otg;
+ if (!otg) {
+ dev_info(dwc->dev, "NO OTG!!\n");
+ usb_put_phy(phy);
+ return count;
+ }
+
+ dev_info(dev, "b_hnp_enable is FALSE\n");
+ dwc->gadget.host_request_flag = 1;
+
+ usb_put_phy(phy);
+ return count;
+}
+static DEVICE_ATTR(hnp, 0220, NULL, store_hnp);
+
+static ssize_t store_hnp_end(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+ unsigned long flags;
+ struct dwc3_otg *dwc_otg;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (IS_ERR(phy) || !phy) {
+ if (!IS_ERR(phy))
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg = phy->otg;
+ if (!otg) {
+ usb_put_phy(phy);
+ return count;
+ }
+
+ dwc_otg = otg_to_dwc3_otg(otg);
+
+ spin_lock_irqsave(&dwc_otg->lock, flags);
+ dwc_otg->user_events |= USER_HNP_END_SESSION;
+ wakeup_main_thread(dwc_otg);
+ spin_unlock_irqrestore(&dwc_otg->lock, flags);
+
+ usb_put_phy(phy);
+ return count;
+}
+static DEVICE_ATTR(hnp_end, 0220, NULL, store_hnp_end);
+
+static ssize_t store_a_hnp_reqd(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct dwc3_otg *otg;
+
+ otg = dwc->otg;
+ host_release(otg);
+ return count;
+}
+static DEVICE_ATTR(a_hnp_reqd, 0220, NULL, store_a_hnp_reqd);
+
+static ssize_t store_print_dbg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct dwc3_otg *otg;
+
+ otg = dwc->otg;
+ print_debug_regs(otg);
+
+ return count;
+}
+static DEVICE_ATTR(print_dbg, 0220, NULL, store_print_dbg);
+
+void dwc_usb3_remove_dev_files(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_print_dbg);
+ device_remove_file(dev, &dev_attr_a_hnp_reqd);
+ device_remove_file(dev, &dev_attr_end);
+ device_remove_file(dev, &dev_attr_srp);
+ device_remove_file(dev, &dev_attr_hnp);
+ device_remove_file(dev, &dev_attr_hnp_end);
+}
+
+int dwc3_otg_create_dev_files(struct device *dev)
+{
+ int retval;
+
+ retval = device_create_file(dev, &dev_attr_hnp);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_hnp_end);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_srp);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_end);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_a_hnp_reqd);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_print_dbg);
+ if (retval)
+ goto fail;
+
+ return 0;
+
+fail:
+ dev_err(dev, "Failed to create one or more sysfs files!!\n");
+ return retval;
+}
+
+void dwc3_otg_init(struct dwc3 *dwc)
+{
+ struct dwc3_otg *otg;
+ int err;
+ u32 reg;
+
+ dev_dbg(dwc->dev, "dwc3_otg_init\n");
+
+ /*
+ * GHWPARAMS6[10] bit is SRPSupport.
+ * This bit also reflects DWC_USB3_EN_OTG
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6);
+ if (!(reg & GHWPARAMS6_SRP_SUPPORT_ENABLED)) {
+ /*
+ * No OTG support in the HW core.
+ * We return 0 to indicate no error, since this is acceptable
+ * situation, just continue probe the dwc3 driver without otg.
+ */
+ dev_dbg(dwc->dev, "dwc3_otg address space is not supported\n");
+ return;
+ }
+
+ otg = kzalloc(sizeof(*otg), GFP_KERNEL);
+ if (!otg) {
+ dev_err(otg->dev, "failed to allocate memroy\n");
+ return;
+ }
+
+ dwc->otg = otg;
+ otg->dev = dwc->dev;
+ otg->dwc = dwc;
+
+ otg->regs = dwc->regs - DWC3_GLOBALS_REGS_START;
+ otg->otg.usb_phy = kzalloc(sizeof(struct usb_phy), GFP_KERNEL);
+ otg->otg.usb_phy->dev = otg->dev;
+ otg->otg.usb_phy->label = "dwc3_otg";
+ otg->otg.state = OTG_STATE_UNDEFINED;
+ otg->otg.usb_phy->otg = &otg->otg;
+ otg->otg.usb_phy->notify_connect = dwc3_otg_notify_connect;
+ otg->otg.usb_phy->notify_disconnect = dwc3_otg_notify_disconnect;
+
+ otg->otg.start_srp = dwc3_otg_start_srp;
+ otg->otg.start_hnp = dwc3_otg_start_hnp;
+ otg->otg.set_host = dwc3_otg_set_host;
+ otg->otg.set_peripheral = dwc3_otg_set_periph;
+
+ otg->hwparams6 = reg;
+ otg->state = OTG_STATE_UNDEFINED;
+
+ spin_lock_init(&otg->lock);
+ init_waitqueue_head(&otg->main_wq);
+
+ err = usb_add_phy(otg->otg.usb_phy, USB_PHY_TYPE_USB3);
+ if (err) {
+ dev_err(otg->dev, "can't register transceiver, err: %d\n",
+ err);
+ goto exit;
+ }
+
+ otg->irq = platform_get_irq(to_platform_device(otg->dev), 1);
+
+ dwc3_otg_create_dev_files(otg->dev);
+
+ /* Set irq handler */
+ err = request_irq(otg->irq, dwc3_otg_irq, IRQF_SHARED, "dwc3_otg", otg);
+ if (err) {
+ dev_err(otg->otg.usb_phy->dev, "failed to request irq #%d --> %d\n",
+ otg->irq, err);
+ goto exit;
+ }
+
+ dwc3_otg_enable_irq(otg);
+
+ err = dwc3_gadget_init(dwc);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(otg->otg.usb_phy->dev,
+ "failed to initialize gadget\n");
+ goto exit;
+ }
+
+ err = dwc3_host_init(dwc);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(otg->otg.usb_phy->dev,
+ "failed to initialize host\n");
+ goto exit;
+ }
+
+ return;
+
+exit:
+ kfree(otg->otg.usb_phy);
+ kfree(otg);
+}
+
+void dwc3_otg_exit(struct dwc3 *dwc)
+{
+ struct dwc3_otg *otg = dwc->otg;
+
+ otg_dbg(otg, "\n");
+ usb_remove_phy(otg->otg.usb_phy);
+ kfree(otg->otg.usb_phy);
+ kfree(otg);
+}
diff --git a/drivers/usb/dwc3/otg.h b/drivers/usb/dwc3/otg.h
new file mode 100644
index 000000000000..25de1e5631a7
--- /dev/null
+++ b/drivers/usb/dwc3/otg.h
@@ -0,0 +1,252 @@
+/**
+ * otg.h - DesignWare USB3 DRD OTG Header
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define otg_dbg(d, fmt, args...) dev_dbg((d)->dev, "%s(): " fmt,\
+ __func__, ## args)
+#define otg_vdbg(d, fmt, args...) dev_vdbg((d)->dev, "%s(): " fmt,\
+ __func__, ## args)
+#define otg_err(d, fmt, args...) dev_err((d)->dev, "%s(): ERROR: " fmt,\
+ __func__, ## args)
+#define otg_warn(d, fmt, args...) dev_warn((d)->dev, "%s(): WARN: " fmt,\
+ __func__, ## args)
+#define otg_info(d, fmt, args...) dev_info((d)->dev, "%s(): INFO: " fmt,\
+ __func__, ## args)
+
+#ifdef VERBOSE_DEBUG
+#define otg_write(o, reg, val) do { \
+ otg_vdbg(o, "OTG_WRITE: reg=0x%05x, val=0x%08x\n", reg, val); \
+ writel(val, ((void *)((o)->regs)) + reg); \
+ } while (0)
+
+#define otg_read(o, reg) ({ \
+ u32 __r = readl(((void *)((o)->regs)) + reg); \
+ otg_vdbg(o, "OTG_READ: reg=0x%05x, val=0x%08x\n", reg, __r); \
+ __r; \
+ })
+#else
+#define otg_write(o, reg, val) writel(val, ((void *)((o)->regs)) + reg)
+#define otg_read(o, reg) readl(((void *)((o)->regs)) + reg)
+#endif
+
+#define sleep_main_thread_until_condition_timeout(otg, condition, msecs) ({ \
+ int __timeout = msecs; \
+ while (!(condition)) { \
+ otg_dbg(otg, " ... sleeping for %d\n", __timeout); \
+ __timeout = sleep_main_thread_timeout(otg, __timeout); \
+ if (__timeout <= 0) { \
+ break; \
+ } \
+ } \
+ __timeout; \
+ })
+
+#define sleep_main_thread_until_condition(otg, condition) ({ \
+ int __rc; \
+ do { \
+ __rc = sleep_main_thread_until_condition_timeout(otg, \
+ condition, 50000); \
+ } while (__rc == 0); \
+ __rc; \
+ })
+
+#define GHWPARAMS6 0xc158
+#define GHWPARAMS6_SRP_SUPPORT_ENABLED 0x0400
+#define GHWPARAMS6_HNP_SUPPORT_ENABLED 0x0800
+
+#define GCTL 0xc110
+#define GCTL_PRT_CAP_DIR 0x3000
+#define GCTL_PRT_CAP_DIR_SHIFT 12
+#define GCTL_PRT_CAP_DIR_HOST 1
+#define GCTL_PRT_CAP_DIR_DEV 2
+#define GCTL_PRT_CAP_DIR_OTG 3
+#define GCTL_GBL_HIBERNATION_EN 0x2
+
+#define OCFG 0xcc00
+#define OCFG_SRP_CAP 0x01
+#define OCFG_SRP_CAP_SHIFT 0
+#define OCFG_HNP_CAP 0x02
+#define OCFG_HNP_CAP_SHIFT 1
+#define OCFG_OTG_VERSION 0x04
+#define OCFG_OTG_VERSION_SHIFT 2
+
+#define OCTL 0xcc04
+#define OCTL_HST_SET_HNP_EN 0x01
+#define OCTL_HST_SET_HNP_EN_SHIFT 0
+#define OCTL_DEV_SET_HNP_EN 0x02
+#define OCTL_DEV_SET_HNP_EN_SHIFT 1
+#define OCTL_TERM_SEL_DL_PULSE 0x04
+#define OCTL_TERM_SEL_DL_PULSE_SHIFT 2
+#define OCTL_SES_REQ 0x08
+#define OCTL_SES_REQ_SHIFT 3
+#define OCTL_HNP_REQ 0x10
+#define OCTL_HNP_REQ_SHIFT 4
+#define OCTL_PRT_PWR_CTL 0x20
+#define OCTL_PRT_PWR_CTL_SHIFT 5
+#define OCTL_PERI_MODE 0x40
+#define OCTL_PERI_MODE_SHIFT 6
+
+#define OEVT 0xcc08
+#define OEVT_ERR 0x00000001
+#define OEVT_ERR_SHIFT 0
+#define OEVT_SES_REQ_SCS 0x00000002
+#define OEVT_SES_REQ_SCS_SHIFT 1
+#define OEVT_HST_NEG_SCS 0x00000004
+#define OEVT_HST_NEG_SCS_SHIFT 2
+#define OEVT_B_SES_VLD_EVT 0x00000008
+#define OEVT_B_SES_VLD_EVT_SHIFT 3
+#define OEVT_B_DEV_VBUS_CHNG_EVNT 0x00000100
+#define OEVT_B_DEV_VBUS_CHNG_EVNT_SHIFT 8
+#define OEVT_B_DEV_SES_VLD_DET_EVNT 0x00000200
+#define OEVT_B_DEV_SES_VLD_DET_EVNT_SHIFT 9
+#define OEVT_B_DEV_HNP_CHNG_EVNT 0x00000400
+#define OEVT_B_DEV_HNP_CHNG_EVNT_SHIFT 10
+#define OEVT_B_DEV_B_HOST_END_EVNT 0x00000800
+#define OEVT_B_DEV_B_HOST_END_EVNT_SHIFT 11
+#define OEVT_A_DEV_SESS_END_DET_EVNT 0x00010000
+#define OEVT_A_DEV_SESS_END_DET_EVNT_SHIFT 16
+#define OEVT_A_DEV_SRP_DET_EVNT 0x00020000
+#define OEVT_A_DEV_SRP_DET_EVNT_SHIFT 17
+#define OEVT_A_DEV_HNP_CHNG_EVNT 0x00040000
+#define OEVT_A_DEV_HNP_CHNG_EVNT_SHIFT 18
+#define OEVT_A_DEV_HOST_EVNT 0x00080000
+#define OEVT_A_DEV_HOST_EVNT_SHIFT 19
+#define OEVT_A_DEV_B_DEV_HOST_END_EVNT 0x00100000
+#define OEVT_A_DEV_B_DEV_HOST_END_EVNT_SHIFT 20
+#define OEVT_A_DEV_IDLE_EVNT 0x00200000
+#define OEVT_A_DEV_IDLE_EVNT_SHIFT 21
+#define OEVT_HOST_ROLE_REQ_INIT_EVNT 0x00400000
+#define OEVT_HOST_ROLE_REQ_INIT_EVNT_SHIFT 22
+#define OEVT_HOST_ROLE_REQ_CONFIRM_EVNT 0x00800000
+#define OEVT_HOST_ROLE_REQ_CONFIRM_EVNT_SHIFT 23
+#define OEVT_CONN_ID_STS_CHNG_EVNT 0x01000000
+#define OEVT_CONN_ID_STS_CHNG_EVNT_SHIFT 24
+#define OEVT_DEV_MOD_EVNT 0x80000000
+#define OEVT_DEV_MOD_EVNT_SHIFT 31
+
+#define OEVTEN 0xcc0c
+
+#define OEVT_ALL (OEVT_CONN_ID_STS_CHNG_EVNT | \
+ OEVT_HOST_ROLE_REQ_INIT_EVNT | \
+ OEVT_HOST_ROLE_REQ_CONFIRM_EVNT | \
+ OEVT_A_DEV_B_DEV_HOST_END_EVNT | \
+ OEVT_A_DEV_HOST_EVNT | \
+ OEVT_A_DEV_HNP_CHNG_EVNT | \
+ OEVT_A_DEV_SRP_DET_EVNT | \
+ OEVT_A_DEV_SESS_END_DET_EVNT | \
+ OEVT_B_DEV_B_HOST_END_EVNT | \
+ OEVT_B_DEV_HNP_CHNG_EVNT | \
+ OEVT_B_DEV_SES_VLD_DET_EVNT | \
+ OEVT_B_DEV_VBUS_CHNG_EVNT)
+
+#define OSTS 0xcc10
+#define OSTS_CONN_ID_STS 0x0001
+#define OSTS_CONN_ID_STS_SHIFT 0
+#define OSTS_A_SES_VLD 0x0002
+#define OSTS_A_SES_VLD_SHIFT 1
+#define OSTS_B_SES_VLD 0x0004
+#define OSTS_B_SES_VLD_SHIFT 2
+#define OSTS_XHCI_PRT_PWR 0x0008
+#define OSTS_XHCI_PRT_PWR_SHIFT 3
+#define OSTS_PERIP_MODE 0x0010
+#define OSTS_PERIP_MODE_SHIFT 4
+#define OSTS_OTG_STATES 0x0f00
+#define OSTS_OTG_STATE_SHIFT 8
+
+#define DCTL 0xc704
+#define DCTL_RUN_STOP 0x80000000
+
+#define OTG_STATE_INVALID -1
+#define OTG_STATE_EXIT 14
+#define OTG_STATE_TERMINATED 15
+
+#define PERI_MODE_HOST 0
+#define PERI_MODE_PERIPHERAL 1
+
+/** The main structure to keep track of OTG driver state. */
+struct dwc3_otg {
+
+ /** OTG PHY */
+ struct usb_otg otg;
+ struct device *dev;
+ struct dwc3 *dwc;
+
+ void __iomem *regs;
+
+ int main_wakeup_needed;
+ struct task_struct *main_thread;
+ wait_queue_head_t main_wq;
+
+ spinlock_t lock;
+
+ int otg_srp_reqd;
+
+ /* Events */
+ u32 otg_events;
+
+ u32 user_events;
+
+ /** User initiated SRP.
+ *
+ * Valid in B-device during sensing/probing. Initiates SRP signalling
+ * across the bus.
+ *
+ * Also valid as an A-device during probing. This causes the A-device to
+ * apply V-bus manually and check for a device. Can be used if the
+ * device does not support SRP and the host does not support ADP.
+ */
+#define USER_SRP_EVENT 0x1
+ /** User initiated HNP (only valid in B-peripheral) */
+#define USER_HNP_EVENT 0x2
+ /** User has ended the session (only valid in B-peripheral) */
+#define USER_END_SESSION 0x4
+ /** User initiated VBUS. This will cause the A-device to turn on the
+ * VBUS and see if a device will connect (only valid in A-device during
+ * sensing/probing)
+ */
+#define USER_VBUS_ON 0x8
+ /** User has initiated RSP */
+#define USER_RSP_EVENT 0x10
+ /** Host release event */
+#define PCD_RECEIVED_HOST_RELEASE_EVENT 0x20
+ /** Initial SRP */
+#define INITIAL_SRP 0x40
+ /** A-device connected event*/
+#define USER_A_CONN_EVENT 0x80
+ /** User initiated HNP END Session. This will make the A-device and
+ * B-device to return back to their previous roles before HNP got
+ * initiated
+ */
+#define USER_HNP_END_SESSION 0x100
+
+ /* States */
+ enum usb_otg_state prev;
+ enum usb_otg_state state;
+
+ u32 hwparams6;
+ int hcd_irq;
+ int irq;
+ int host_started;
+ int peripheral_started;
+ int dev_enum;
+
+ struct delayed_work hp_work; /* drives HNP polling */
+
+};
+
+extern int usb_port_suspend(struct usb_device *udev, pm_message_t msg);
+extern void usb_kick_hub_wq(struct usb_device *dev);
diff --git a/drivers/usb/dwc3/platform_data.h b/drivers/usb/dwc3/platform_data.h
new file mode 100644
index 000000000000..ae659e367804
--- /dev/null
+++ b/drivers/usb/dwc3/platform_data.h
@@ -0,0 +1,54 @@
+/**
+ * platform_data.h - USB DWC3 Platform Data Support
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/otg.h>
+
+struct dwc3_platform_data {
+ enum usb_device_speed maximum_speed;
+ enum usb_dr_mode dr_mode;
+ bool usb3_lpm_capable;
+
+ unsigned is_utmi_l1_suspend:1;
+ u8 hird_threshold;
+
+ u8 lpm_nyet_threshold;
+
+ unsigned disable_scramble_quirk:1;
+ unsigned has_lpm_erratum:1;
+ unsigned u2exit_lfps_quirk:1;
+ unsigned u2ss_inp3_quirk:1;
+ unsigned req_p1p2p3_quirk:1;
+ unsigned del_p1p2p3_quirk:1;
+ unsigned del_phy_power_chg_quirk:1;
+ unsigned lfps_filter_quirk:1;
+ unsigned rx_detect_poll_quirk:1;
+ unsigned dis_u3_susphy_quirk:1;
+ unsigned dis_u2_susphy_quirk:1;
+ unsigned dis_enblslpm_quirk:1;
+ unsigned dis_rxdet_inp3_quirk:1;
+
+ unsigned tx_de_emphasis_quirk:1;
+ unsigned tx_de_emphasis:2;
+
+ u32 fladj_value;
+ bool refclk_fladj;
+
+ const char *hsphy_interface;
+};
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index a7709d126b29..452fd36f9c10 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -231,6 +231,47 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
return len;
}
+static ssize_t gadget_dev_desc_max_speed_show(struct config_item *item,
+ char *page)
+{
+ struct gadget_info *gi = to_gadget_info(item);
+ enum usb_device_speed max_speed = gi->composite.gadget_driver.max_speed;
+
+ return sprintf(page, "%s\n", usb_speed_string(max_speed));
+}
+
+static ssize_t gadget_dev_desc_max_speed_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct gadget_info *gi = to_gadget_info(item);
+ char *name;
+ int ret;
+ const char * const speed_names[] = {
+ [USB_SPEED_UNKNOWN] = "UNKNOWN",
+ [USB_SPEED_LOW] = "low-speed",
+ [USB_SPEED_FULL] = "full-speed",
+ [USB_SPEED_HIGH] = "high-speed",
+ [USB_SPEED_WIRELESS] = "wireless",
+ [USB_SPEED_SUPER] = "super-speed",
+ [USB_SPEED_SUPER_PLUS] = "super-speed-plus",
+ };
+
+ name = kstrdup(page, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ if (name[len - 1] == '\n')
+ name[len - 1] = '\0';
+
+ ret = match_string(speed_names, ARRAY_SIZE(speed_names), name);
+
+ if (ret != -EINVAL) {
+ gi->composite.gadget_driver.max_speed = ret;
+ return len;
+ }
+
+ return ret;
+}
+
static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
{
char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name;
@@ -304,6 +345,7 @@ CONFIGFS_ATTR(gadget_dev_desc_, idVendor);
CONFIGFS_ATTR(gadget_dev_desc_, idProduct);
CONFIGFS_ATTR(gadget_dev_desc_, bcdDevice);
CONFIGFS_ATTR(gadget_dev_desc_, bcdUSB);
+CONFIGFS_ATTR(gadget_dev_desc_, max_speed);
CONFIGFS_ATTR(gadget_dev_desc_, UDC);
static struct configfs_attribute *gadget_root_attrs[] = {
@@ -315,6 +357,7 @@ static struct configfs_attribute *gadget_root_attrs[] = {
&gadget_dev_desc_attr_idProduct,
&gadget_dev_desc_attr_bcdDevice,
&gadget_dev_desc_attr_bcdUSB,
+ &gadget_dev_desc_attr_max_speed,
&gadget_dev_desc_attr_UDC,
NULL,
};
@@ -1545,7 +1588,7 @@ static struct config_group *gadgets_make(
gi->composite.unbind = configfs_do_nothing;
gi->composite.suspend = NULL;
gi->composite.resume = NULL;
- gi->composite.max_speed = USB_SPEED_SUPER;
+ gi->composite.max_speed = gi->composite.gadget_driver.max_speed;
spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 7f01f78b1d23..6ca4fc3b3a3b 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -43,14 +43,17 @@ static inline struct f_uas *to_f_uas(struct usb_function *f)
/* Start bot.c code */
+static struct usbg_cdb *acquire_cmd_request(struct f_uas *fu);
+static void release_cmd_request(struct f_uas *fu, struct usb_request *req);
static int bot_enqueue_cmd_cbw(struct f_uas *fu)
{
int ret;
+ struct usbg_cdb *cmd = acquire_cmd_request(fu);
if (fu->flags & USBG_BOT_CMD_PEND)
return 0;
- ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
+ ret = usb_ep_queue(fu->ep_out, cmd->req, GFP_ATOMIC);
if (!ret)
fu->flags |= USBG_BOT_CMD_PEND;
return ret;
@@ -61,6 +64,7 @@ static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
struct usbg_cmd *cmd = req->context;
struct f_uas *fu = cmd->fu;
+ release_cmd_request(fu, req);
transport_generic_free_cmd(&cmd->se_cmd, 0);
if (req->status < 0) {
pr_err("ERR %s(%d)\n", __func__, __LINE__);
@@ -136,7 +140,7 @@ static void bot_send_bad_status(struct usbg_cmd *cmd)
}
req->complete = bot_err_compl;
req->context = cmd;
- req->buf = fu->cmd.buf;
+ req->buf = fu->cmd[0]->buf;
usb_ep_queue(ep, req, GFP_KERNEL);
} else {
bot_enqueue_sense_code(fu, cmd);
@@ -245,7 +249,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct usb_gadget *gadget = fuas_to_gadget(fu);
int ret;
init_completion(&cmd->write_complete);
@@ -256,22 +259,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
return -EINVAL;
}
- if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
- if (!cmd->data_buf)
- return -ENOMEM;
-
- fu->bot_req_out->buf = cmd->data_buf;
- } else {
- fu->bot_req_out->buf = NULL;
- fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
- fu->bot_req_out->sg = se_cmd->t_data_sg;
- }
-
- fu->bot_req_out->complete = usbg_data_write_cmpl;
- fu->bot_req_out->length = se_cmd->data_length;
- fu->bot_req_out->context = cmd;
-
ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
if (ret)
goto cleanup;
@@ -297,11 +284,84 @@ static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
if (req->status < 0)
return;
+ release_cmd_request(fu, req);
ret = bot_submit_command(fu, req->buf, req->actual);
if (ret)
pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
}
+static struct usbg_cdb *acquire_cmd_request(struct f_uas *fu)
+{
+ int i;
+
+ for (i = 0; i < fu->ncmd; i++) {
+ if (!fu->cmd[i]->claimed) {
+ fu->cmd[i]->claimed = true;
+ return fu->cmd[i];
+ }
+ }
+ return NULL;
+}
+
+static void release_cmd_request(struct f_uas *fu, struct usb_request *req)
+{
+ int i;
+
+ for (i = 0; i < fu->ncmd; i++) {
+ if (fu->cmd[i]->req == req)
+ fu->cmd[i]->claimed = false;
+ }
+}
+
+static void free_cmd_resource(struct f_uas *fu, struct usb_ep *ep)
+{
+ int i;
+
+ for (i = 0; i < fu->ncmd; i++) {
+ if (fu->cmd[i]->req)
+ usb_ep_free_request(ep, fu->cmd[i]->req);
+
+ kfree(fu->cmd[i]->buf);
+ fu->cmd[i]->buf = NULL;
+
+ kfree(fu->cmd[i]);
+ fu->cmd[i] = NULL;
+ }
+}
+
+static int alloc_cmd_resource(struct f_uas *fu, int num, struct usb_ep *ep,
+ void (*complete)(struct usb_ep *ep,
+ struct usb_request *req))
+{
+ int i;
+
+ fu->ncmd = num;
+ for (i = 0; i < fu->ncmd; i++) {
+ fu->cmd[i] = kcalloc(fu->ncmd, sizeof(struct usbg_cdb),
+ GFP_KERNEL);
+ if (!fu->cmd)
+ goto err_cmd;
+
+ fu->cmd[i]->req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!fu->cmd[i]->req)
+ goto err_cmd;
+
+ fu->cmd[i]->buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
+ if (!fu->cmd[i]->buf)
+ goto err_cmd;
+
+ fu->cmd[i]->req->complete = complete;
+ fu->cmd[i]->req->buf = fu->cmd[i]->buf;
+ fu->cmd[i]->req->length = fu->ep_out->maxpacket;
+ fu->cmd[i]->req->context = fu;
+ }
+
+ return 0;
+err_cmd:
+ free_cmd_resource(fu, ep);
+ return -ENOMEM;
+}
+
static int bot_prepare_reqs(struct f_uas *fu)
{
int ret;
@@ -314,10 +374,6 @@ static int bot_prepare_reqs(struct f_uas *fu)
if (!fu->bot_req_out)
goto err_out;
- fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
- if (!fu->cmd.req)
- goto err_cmd;
-
fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
if (!fu->bot_status.req)
goto err_sts;
@@ -327,28 +383,20 @@ static int bot_prepare_reqs(struct f_uas *fu)
fu->bot_status.req->complete = bot_status_complete;
fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
- fu->cmd.buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
- if (!fu->cmd.buf)
- goto err_buf;
-
- fu->cmd.req->complete = bot_cmd_complete;
- fu->cmd.req->buf = fu->cmd.buf;
- fu->cmd.req->length = fu->ep_out->maxpacket;
- fu->cmd.req->context = fu;
+ ret = alloc_cmd_resource(fu, BOT_MAX_COMMANDS, fu->ep_out,
+ bot_cmd_complete);
+ if (ret)
+ goto err_cmd;
ret = bot_enqueue_cmd_cbw(fu);
if (ret)
goto err_queue;
return 0;
err_queue:
- kfree(fu->cmd.buf);
- fu->cmd.buf = NULL;
-err_buf:
+ free_cmd_resource(fu, fu->ep_out);
+err_cmd:
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
err_sts:
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
- fu->cmd.req = NULL;
-err_cmd:
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
fu->bot_req_out = NULL;
err_out:
@@ -372,16 +420,13 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
usb_ep_free_request(fu->ep_in, fu->bot_req_in);
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
- kfree(fu->cmd.buf);
+ free_cmd_resource(fu, fu->ep_out);
fu->bot_req_in = NULL;
fu->bot_req_out = NULL;
- fu->cmd.req = NULL;
fu->bot_status.req = NULL;
- fu->cmd.buf = NULL;
}
static void bot_set_alt(struct f_uas *fu)
@@ -480,14 +525,6 @@ static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
stream->req_status = NULL;
}
-static void uasp_free_cmdreq(struct f_uas *fu)
-{
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
- kfree(fu->cmd.buf);
- fu->cmd.req = NULL;
- fu->cmd.buf = NULL;
-}
-
static void uasp_cleanup_old_alt(struct f_uas *fu)
{
int i;
@@ -502,7 +539,7 @@ static void uasp_cleanup_old_alt(struct f_uas *fu)
for (i = 0; i < UASP_SS_EP_COMP_NUM_STREAMS; i++)
uasp_cleanup_one_stream(fu, &fu->stream[i]);
- uasp_free_cmdreq(fu);
+ free_cmd_resource(fu, fu->ep_cmd);
}
static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
@@ -565,6 +602,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
struct usbg_cmd *cmd = req->context;
struct uas_stream *stream = cmd->stream;
struct f_uas *fu = cmd->fu;
+ struct usbg_cdb *cmd_cdb;
int ret;
if (req->status < 0)
@@ -599,7 +637,8 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
case UASP_QUEUE_COMMAND:
transport_generic_free_cmd(&cmd->se_cmd, 0);
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+ cmd_cdb = acquire_cmd_request(fu);
+ usb_ep_queue(fu->ep_cmd, cmd_cdb->req, GFP_ATOMIC);
break;
default:
@@ -719,11 +758,13 @@ static int usbg_submit_command(struct f_uas *, void *, unsigned int);
static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_uas *fu = req->context;
+ struct usbg_cdb *cmd;
int ret;
if (req->status < 0)
return;
+ release_cmd_request(fu, req);
ret = usbg_submit_command(fu, req->buf, req->actual);
/*
* Once we tune for performance enqueue the command req here again so
@@ -733,7 +774,8 @@ static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
*/
if (!ret)
return;
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+ cmd = acquire_cmd_request(fu);
+ usb_ep_queue(fu->ep_cmd, cmd->req, GFP_ATOMIC);
}
static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
@@ -761,28 +803,6 @@ out:
return -ENOMEM;
}
-static int uasp_alloc_cmd(struct f_uas *fu)
-{
- fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
- if (!fu->cmd.req)
- goto err;
-
- fu->cmd.buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
- if (!fu->cmd.buf)
- goto err_buf;
-
- fu->cmd.req->complete = uasp_cmd_complete;
- fu->cmd.req->buf = fu->cmd.buf;
- fu->cmd.req->length = fu->ep_cmd->maxpacket;
- fu->cmd.req->context = fu;
- return 0;
-
-err_buf:
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
-err:
- return -ENOMEM;
-}
-
static void uasp_setup_stream_res(struct f_uas *fu, int max_streams)
{
int i;
@@ -800,12 +820,15 @@ static int uasp_prepare_reqs(struct f_uas *fu)
{
int ret;
int i;
- int max_streams;
+ int max_streams, max_commands;
- if (fu->flags & USBG_USE_STREAMS)
+ if (fu->flags & USBG_USE_STREAMS) {
+ max_commands = UASP_MAX_COMMANDS;
max_streams = UASP_SS_EP_COMP_NUM_STREAMS;
- else
+ } else {
+ max_commands = 1;
max_streams = 1;
+ }
for (i = 0; i < max_streams; i++) {
ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
@@ -813,19 +836,25 @@ static int uasp_prepare_reqs(struct f_uas *fu)
goto err_cleanup;
}
- ret = uasp_alloc_cmd(fu);
+ ret = alloc_cmd_resource(fu, max_commands, fu->ep_cmd,
+ uasp_cmd_complete);
if (ret)
goto err_free_stream;
uasp_setup_stream_res(fu, max_streams);
- ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
- if (ret)
- goto err_free_stream;
+ /* queue number of commands */
+ for (i = 0; i < fu->ncmd; i++) {
+ struct usbg_cdb *cmd = acquire_cmd_request(fu);
+
+ ret = usb_ep_queue(fu->ep_cmd, cmd->req, GFP_ATOMIC);
+ if (ret)
+ goto err_free_stream;
+ }
return 0;
err_free_stream:
- uasp_free_cmdreq(fu);
+ free_cmd_resource(fu, fu->ep_cmd);
err_cleanup:
if (i) {
@@ -838,16 +867,28 @@ err_cleanup:
return ret;
}
+#define SS_BOT_INTERFACE_DESC_NO 5
static void uasp_set_alt(struct f_uas *fu)
{
struct usb_function *f = &fu->function;
struct usb_gadget *gadget = f->config->cdev->gadget;
+ struct usb_descriptor_header **ss_uasp_backup = f->ss_descriptors;
int ret;
fu->flags = USBG_IS_UAS;
- if (gadget->speed == USB_SPEED_SUPER)
+ if (gadget->speed == USB_SPEED_SUPER) {
fu->flags |= USBG_USE_STREAMS;
+ /* If device connect in SS then comp_descriptor with stream
+ * should be attached to descriptor. Since BOT and UAS using
+ * same endpoint, config_ep_by_speed will returns first match
+ * with comp_descriptor without stream. This is just workaround
+ * proper fix need to be introduced. Here advancing descritor
+ * header ss_descriptors with number of descriptor present in
+ * BOT mode.
+ */
+ f->ss_descriptors += SS_BOT_INTERFACE_DESC_NO;
+ }
config_ep_by_speed(gadget, f, fu->ep_in);
ret = usb_ep_enable(fu->ep_in);
@@ -873,6 +914,10 @@ static void uasp_set_alt(struct f_uas *fu)
goto err_wq;
fu->flags |= USBG_ENABLED;
+ /* restore ss_descriptors */
+ if (gadget->speed == USB_SPEED_SUPER)
+ f->ss_descriptors = ss_uasp_backup;
+
pr_info("Using the UAS protocol\n");
return;
err_wq:
@@ -884,6 +929,9 @@ err_cmd:
err_b_out:
usb_ep_disable(fu->ep_in);
err_b_in:
+ /* restore ss_descriptors */
+ if (gadget->speed == USB_SPEED_SUPER)
+ f->ss_descriptors = ss_uasp_backup;
fu->flags = 0;
}
@@ -949,6 +997,56 @@ static int get_cmd_dir(const unsigned char *cdb)
return ret;
}
+static void recover_w_length_with_maxpacket(struct usbg_cmd *cmd,
+ struct usb_request *req)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct f_uas *fu = cmd->fu;
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+ int rem;
+
+ rem = se_cmd->data_length % fu->ep_out->maxpacket;
+ if (rem) {
+ /* recover paded data length */
+ cmd->data_len -= fu->ep_out->maxpacket - rem;
+
+ if (gadget->sg_supported) {
+ struct scatterlist *s = sg_last(se_cmd->t_data_sg,
+ se_cmd->t_data_nents);
+
+ s->length -= fu->ep_out->maxpacket - rem;
+ }
+ }
+}
+
+static void adjust_w_length_with_maxpacket(struct usbg_cmd *cmd,
+ struct usb_request *req)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct f_uas *fu = cmd->fu;
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+ int rem;
+
+ cmd->data_len = se_cmd->data_length;
+ rem = cmd->data_len % fu->ep_out->maxpacket;
+ if (rem) {
+ /* pad data length so that transfer size can be in multiple of
+ * max packet size
+ */
+ cmd->data_len += fu->ep_out->maxpacket - rem;
+
+ if (gadget->sg_supported) {
+ /* if sg is supported and data length in page also need
+ * to be adjusted as multiple of max packet size.
+ */
+ struct scatterlist *s = sg_last(se_cmd->t_data_sg,
+ se_cmd->t_data_nents);
+
+ s->length += fu->ep_out->maxpacket - rem;
+ }
+ }
+}
+
static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
{
struct usbg_cmd *cmd = req->context;
@@ -959,6 +1057,8 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
goto cleanup;
}
+ recover_w_length_with_maxpacket(cmd, req);
+
if (req->num_sgs == 0) {
sg_copy_from_buffer(se_cmd->t_data_sg,
se_cmd->t_data_nents,
@@ -979,8 +1079,10 @@ static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
struct f_uas *fu = cmd->fu;
struct usb_gadget *gadget = fuas_to_gadget(fu);
+ adjust_w_length_with_maxpacket(cmd, req);
+
if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
+ cmd->data_buf = kmalloc(cmd->data_len, GFP_ATOMIC);
if (!cmd->data_buf)
return -ENOMEM;
@@ -992,7 +1094,7 @@ static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
}
req->complete = usbg_data_write_cmpl;
- req->length = se_cmd->data_length;
+ req->length = cmd->data_len;
req->context = cmd;
return 0;
}
@@ -1185,7 +1287,8 @@ static void bot_cmd_work(struct work_struct *work)
if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
- cmd->data_len, cmd->prio_attr, dir, 0) < 0)
+ cmd->data_len, cmd->prio_attr, dir,
+ TARGET_SCF_ACK_KREF) < 0)
goto out;
return;
@@ -1674,9 +1777,11 @@ static ssize_t tcm_usbg_tpg_nexus_store(struct config_item *item,
CONFIGFS_ATTR(tcm_usbg_tpg_, enable);
CONFIGFS_ATTR(tcm_usbg_tpg_, nexus);
+static struct configfs_attribute tcm_usbg_tpg_attr_maxburst;
static struct configfs_attribute *usbg_base_attrs[] = {
&tcm_usbg_tpg_attr_enable,
&tcm_usbg_tpg_attr_nexus,
+ &tcm_usbg_tpg_attr_maxburst,
NULL,
};
@@ -1984,6 +2089,32 @@ static struct usb_gadget_strings *tcm_strings[] = {
NULL,
};
+static ssize_t tcm_usbg_tpg_maxburst_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", uasp_cmd_comp_desc.bMaxBurst);
+}
+
+static ssize_t tcm_usbg_tpg_maxburst_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ int value;
+ int ret;
+
+ ret = kstrtouint(page, 10, &value);
+ if (ret)
+ return ret;
+
+ uasp_bi_ep_comp_desc.bMaxBurst = value;
+ uasp_bo_ep_comp_desc.bMaxBurst = value;
+ uasp_status_in_ep_comp_desc.bMaxBurst = value;
+ uasp_cmd_comp_desc.bMaxBurst = value;
+ bot_bi_ep_comp_desc.bMaxBurst = value;
+ bot_bo_ep_comp_desc.bMaxBurst = value;
+
+ return count;
+}
+CONFIGFS_ATTR(tcm_usbg_tpg_, maxburst);
+
static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct f_uas *fu = to_f_uas(f);
@@ -2112,6 +2243,13 @@ static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
return -EOPNOTSUPP;
}
+static int tcm_get_alt(struct usb_function *f, unsigned int intf)
+{
+ struct f_uas *fu = to_f_uas(f);
+
+ return fu->flags & USBG_IS_UAS ? 1 : 0;
+}
+
static void tcm_disable(struct usb_function *f)
{
struct f_uas *fu = to_f_uas(f);
@@ -2300,6 +2438,7 @@ static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
fu->function.bind = tcm_bind;
fu->function.unbind = tcm_unbind;
fu->function.set_alt = tcm_set_alt;
+ fu->function.get_alt = tcm_get_alt;
fu->function.setup = tcm_setup;
fu->function.disable = tcm_disable;
fu->function.free_func = tcm_free;
diff --git a/drivers/usb/gadget/function/tcm.h b/drivers/usb/gadget/function/tcm.h
index 3cd565794ad7..54ed3bca8add 100644
--- a/drivers/usb/gadget/function/tcm.h
+++ b/drivers/usb/gadget/function/tcm.h
@@ -98,6 +98,7 @@ struct uas_stream {
struct usbg_cdb {
struct usb_request *req;
void *buf;
+ bool claimed;
};
struct bot_status {
@@ -105,6 +106,9 @@ struct bot_status {
struct bulk_cs_wrap csw;
};
+#define UASP_MAX_COMMANDS 6
+#define BOT_MAX_COMMANDS 1
+#define MAX_COMMANDS UASP_MAX_COMMANDS
struct f_uas {
struct usbg_tpg *tpg;
struct usb_function function;
@@ -117,7 +121,8 @@ struct f_uas {
#define USBG_IS_BOT (1 << 3)
#define USBG_BOT_CMD_PEND (1 << 4)
- struct usbg_cdb cmd;
+ u32 ncmd;
+ struct usbg_cdb *cmd[MAX_COMMANDS];
struct usb_ep *ep_in;
struct usb_ep *ep_out;
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 5c042f380708..de64182964a9 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -207,8 +207,8 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
video->encode(req, video, buf);
- ret = uvcg_video_ep_queue(video, req);
spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ ret = uvcg_video_ep_queue(video, req);
if (ret < 0) {
uvcg_queue_cancel(queue, 0);
@@ -332,9 +332,10 @@ int uvcg_video_pump(struct uvc_video *video)
video->encode(req, video, buf);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+
/* Queue the USB request */
ret = uvcg_video_ep_queue(video, req);
- spin_unlock_irqrestore(&queue->irqlock, flags);
if (ret < 0) {
uvcg_queue_cancel(queue, 0);
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index b1f4104d1283..d492c51e811a 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -11,6 +11,7 @@
* USB peripheral controller (at91_udc.c).
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
@@ -171,6 +172,7 @@ struct xusb_ep {
* @addr: the usb device base address
* @lock: instance of spinlock
* @dma_enabled: flag indicating whether the dma is included in the system
+ * @clk: pointer to struct clk
* @read_fn: function pointer to read device registers
* @write_fn: function pointer to write to device registers
*/
@@ -188,8 +190,9 @@ struct xusb_udc {
void __iomem *addr;
spinlock_t lock;
bool dma_enabled;
+ struct clk *clk;
- unsigned int (*read_fn)(void __iomem *);
+ unsigned int (*read_fn)(void __iomem *reg);
void (*write_fn)(void __iomem *, u32, u32);
};
@@ -1399,7 +1402,6 @@ err:
/**
* xudc_stop - stops the device.
* @gadget: pointer to the usb gadget structure
- * @driver: pointer to usb gadget driver structure
*
* Return: zero always
*/
@@ -1732,7 +1734,7 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
*
* Process setup packet and delegate to gadget layer.
*/
-static void xudc_handle_setup(struct xusb_udc *udc)
+static void xudc_handle_setup(struct xusb_udc *udc) __must_hold(&udc->lock)
{
struct xusb_ep *ep0 = &udc->ep[0];
struct usb_ctrlrequest setup;
@@ -2094,6 +2096,26 @@ static int xudc_probe(struct platform_device *pdev)
udc->gadget.ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO].ep_usb;
udc->gadget.name = driver_name;
+ udc->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(udc->clk)) {
+ if (PTR_ERR(udc->clk) != -ENOENT) {
+ ret = PTR_ERR(udc->clk);
+ goto fail;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ udc->clk = NULL;
+ }
+
+ ret = clk_prepare_enable(udc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+
spin_lock_init(&udc->lock);
/* Check for IP endianness */
@@ -2149,10 +2171,62 @@ static int xudc_remove(struct platform_device *pdev)
struct xusb_udc *udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
+ clk_disable_unprepare(udc->clk);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int xudc_suspend(struct device *dev)
+{
+ struct xusb_udc *udc;
+ u32 crtlreg;
+ unsigned long flags;
+
+ udc = dev_get_drvdata(dev);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ crtlreg &= ~XUSB_CONTROL_USB_READY_MASK;
+
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ if (udc->driver && udc->driver->disconnect)
+ udc->driver->disconnect(&udc->gadget);
+
+ clk_disable(udc->clk);
+
+ return 0;
+}
+
+static int xudc_resume(struct device *dev)
+{
+ struct xusb_udc *udc;
+ u32 crtlreg;
+ unsigned long flags;
+
+ udc = dev_get_drvdata(dev);
+
+ clk_enable(udc->clk);
+ spin_lock_irqsave(&udc->lock, flags);
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ crtlreg |= XUSB_CONTROL_USB_READY_MASK;
+
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops xudc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xudc_suspend, xudc_resume)
+};
+
/* Match table for of_platform binding */
static const struct of_device_id usb_of_match[] = {
{ .compatible = "xlnx,usb2-device-4.00.a", },
@@ -2164,6 +2238,7 @@ static struct platform_driver xudc_driver = {
.driver = {
.name = driver_name,
.of_match_table = usb_of_match,
+ .pm = &xudc_pm_ops,
},
.probe = xudc_probe,
.remove = xudc_remove,
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index d2a27578e440..e2d0f7809c83 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -32,6 +32,8 @@
* There are cases when the host controller fails to enable the port due to,
* for example, insufficient power that can be supplied to the device from
* the USB bus. In those cases, the messages printed here are not helpful.
+ *
+ * Return: Always return 0
*/
static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
{
@@ -46,11 +48,9 @@ static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
dev_warn(hcd->self.controller,
"Maybe your device is not a high speed device?\n");
dev_warn(hcd->self.controller,
- "The USB host controller does not support full speed "
- "nor low speed devices\n");
+ "USB host controller doesn't support FS/LS devices\n");
dev_warn(hcd->self.controller,
- "You can reconfigure the host controller to have "
- "full speed support\n");
+ "You can reconfigure host controller to support FS\n");
}
return 0;
@@ -112,6 +112,8 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
* host controller. Because the Xilinx USB host controller can be configured
* as HS only or HS/FS only, it checks the configuration in the device tree
* entry, and sets an appropriate value for hcd->has_tt.
+ *
+ * Return: zero on success, 'rv' value on failure
*/
static int ehci_hcd_xilinx_of_probe(struct platform_device *op)
{
@@ -196,6 +198,8 @@ err_irq:
*
* Remove the hcd structure, and release resources that has been requested
* during probe.
+ *
+ * Return: Always return 0
*/
static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
{
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 4ba0576bafa1..5bfb069cd33b 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1823,6 +1823,13 @@ int xhci_bus_resume(struct usb_hcd *hcd)
}
}
+ /* After resuming back from suspend, the controller may not initiate
+ * LFPS.U3_exit signalling if not given a delay after updating the
+ * link from U3->U0. So, lets wait for atleast 1ms
+ */
+ if (next_state == XDEV_U0)
+ mdelay(1);
+
/* poll for U0 link state complete, both USB2 and USB3 */
for_each_set_bit(port_index, &bus_state->bus_suspended, BITS_PER_LONG) {
sret = xhci_handshake(ports[port_index]->addr, PORT_PLC,
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 4e168de8944d..456f5d0667fe 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -685,6 +685,16 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
cur_stream, (unsigned long long) addr);
+ if (xhci->quirks & XHCI_STREAM_QUIRK) {
+ /* dwc3 host controller has an issue where it doesn't
+ * process BULK IN stream rings even after ringing
+ * DoorBell, so setup a timer to aviod hang condition.
+ */
+ timer_setup(&cur_ring->stream_timer,
+ xhci_stream_timeout, 0);
+ cur_ring->xhci = xhci;
+ }
+
ret = xhci_update_stream_mapping(cur_ring, mem_flags);
if (ret) {
xhci_ring_free(xhci, cur_ring);
@@ -771,6 +781,10 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
for (cur_stream = 1; cur_stream < stream_info->num_streams;
cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
+
+ if (xhci->quirks & XHCI_STREAM_QUIRK)
+ del_timer_sync(&cur_ring->stream_timer);
+
if (cur_ring) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 19c5eee20eb4..3f8b8e3146e7 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -19,6 +19,8 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/usb/of.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/xhci_pdriver.h>
#include "xhci.h"
#include "xhci-plat.h"
@@ -157,6 +159,35 @@ static const struct of_device_id usb_xhci_of_match[] = {
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
#endif
+static int usb_otg_set_host(struct device *dev, struct usb_hcd *hcd, bool yes)
+{
+ int ret = 0;
+
+ hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (!IS_ERR_OR_NULL(hcd->usb_phy) && hcd->usb_phy->otg) {
+ if (yes) {
+ if (otg_set_host(hcd->usb_phy->otg, &hcd->self)) {
+ usb_put_phy(hcd->usb_phy);
+ goto disable_phy;
+ }
+ } else {
+ ret = otg_set_host(hcd->usb_phy->otg, NULL);
+ usb_put_phy(hcd->usb_phy);
+ goto disable_phy;
+ }
+
+ } else {
+ goto disable_phy;
+ }
+
+ return 0;
+
+disable_phy:
+ hcd->usb_phy = NULL;
+
+ return ret;
+}
+
static int xhci_plat_probe(struct platform_device *pdev)
{
const struct xhci_plat_priv *priv_match;
@@ -212,6 +243,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
return ret;
}
+ /* Set the controller as wakeup capable */
+ device_set_wakeup_capable(&pdev->dev, true);
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
@@ -284,6 +318,12 @@ static int xhci_plat_probe(struct platform_device *pdev)
/* Iterate over all parent nodes for finding quirks */
for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) {
+ if (device_property_read_bool(&pdev->dev, "xhci-stream-quirk"))
+ xhci->quirks |= XHCI_STREAM_QUIRK;
+
+ if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
+ xhci->quirks |= XHCI_BROKEN_PORT_PED;
+
if (device_property_read_bool(tmpdev, "usb2-lpm-disable"))
xhci->quirks |= XHCI_HW_LPM_DISABLE;
@@ -323,6 +363,10 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (ret)
goto dealloc_usb2_hcd;
+ ret = usb_otg_set_host(&pdev->dev, hcd, true);
+ if (ret)
+ goto dealloc_usb2_hcd;
+
device_enable_async_suspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
@@ -375,6 +419,8 @@ static int xhci_plat_remove(struct platform_device *dev)
xhci->shared_hcd = NULL;
usb_phy_shutdown(hcd->usb_phy);
+ usb_otg_set_host(&dev->dev, hcd, false);
+
usb_remove_hcd(hcd);
usb_put_hcd(shared_hcd);
@@ -394,6 +440,16 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+#if IS_ENABLED(CONFIG_USB_DWC3_OF_SIMPLE)
+ /* Inform dwc3 driver about the device wakeup capability */
+ if (device_may_wakeup(&hcd->self.root_hub->dev)) {
+ enable_irq_wake(hcd->irq);
+ dwc3_host_wakeup_capable(dev, true);
+ } else {
+ dwc3_host_wakeup_capable(dev, false);
+ }
+#endif
+
/*
* xhci_suspend() needs `do_wakeup` to know whether host is allowed
* to do wakeup during suspend. Since xhci_plat_suspend is currently
@@ -431,6 +487,9 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ if (device_may_wakeup(&hcd->self.root_hub->dev))
+ disable_irq_wake(hcd->irq);
+
return xhci_resume(xhci, 0);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5a93a225c97e..fb4581dc43e2 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -829,9 +829,21 @@ remove_finished_td:
*/
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
+
inc_td_cnt(cur_td->urb);
- if (last_td_in_urb(cur_td))
- xhci_giveback_urb_in_irq(xhci, cur_td, 0);
+
+ if (last_td_in_urb(cur_td)) {
+ if ((xhci->quirks & XHCI_STREAM_QUIRK) &&
+ (ep_ring->stream_timeout_handler == true)) {
+ /* We get here if stream timer timed out and stop
+ * command is issued. Send urb status as -EAGAIN
+ * so that the same urb can be re-submitted.
+ */
+ xhci_giveback_urb_in_irq(xhci, cur_td, -EAGAIN);
+ ep_ring->stream_timeout_handler = false;
+ } else
+ xhci_giveback_urb_in_irq(xhci, cur_td, 0);
+ }
/* Stop processing the cancelled list if the watchdog timer is
* running.
@@ -940,6 +952,84 @@ void xhci_hc_died(struct xhci_hcd *xhci)
usb_hc_died(xhci_to_hcd(xhci));
}
+/* This function is called when the stream ring timer gets timedout.
+ * dwc3 host controller has an issue where it doesn't process the BULK IN
+ * stream ring TD's(once in a while) even after ringing DoorBell for that
+ * stream ring. Because of this behaviour there will be no transfer events
+ * generated by the controller on the stream ring, resulting in the hang
+ * condition. xhci_stream_timeout() solves this issue by sending a stop
+ * command on the stream ring after stream timer gets timedout.
+ */
+void xhci_stream_timeout(struct timer_list *arg)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_virt_ep *ep;
+ struct xhci_ring *ep_ring;
+ unsigned int slot_id, ep_index, stream_id;
+ struct xhci_td *td = NULL;
+ struct urb *urb = NULL;
+ struct urb_priv *urb_priv;
+ struct xhci_command *command;
+ unsigned long flags;
+ int i;
+
+ ep_ring = from_timer(ep_ring, arg, stream_timer);
+ xhci = ep_ring->xhci;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ if (!list_empty(&ep_ring->td_list)) {
+ td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+ urb = td->urb;
+ urb_priv = urb->hcpriv;
+
+ slot_id = urb->dev->slot_id;
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ stream_id = ep_ring->stream_id;
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep_ring->stream_timeout_handler = true;
+
+ /* Delete the stream ring timer */
+ del_timer(&ep_ring->stream_timer);
+
+ for (i = 0; i < urb_priv->num_tds; i++) {
+ td = &urb_priv->td[i];
+ list_add_tail(&td->cancelled_td_list,
+ &ep->cancelled_td_list);
+ }
+
+ /* Queue a stop endpoint command, but only if this is
+ * the first cancellation to be handled.
+ */
+ if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
+ command = xhci_alloc_command(xhci, false,
+ GFP_ATOMIC);
+ if (!command) {
+ xhci_warn(xhci,
+ "%s: Failed to allocate command\n",
+ __func__);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
+ }
+
+ ep->ep_state |= EP_STOP_CMD_PENDING;
+ ep->stop_cmd_timer.expires = jiffies +
+ XHCI_STOP_EP_CMD_TIMEOUT * HZ;
+ add_timer(&ep->stop_cmd_timer);
+ xhci_queue_stop_endpoint(xhci, command,
+ urb->dev->slot_id, ep_index, 0);
+ xhci_ring_cmd_db(xhci);
+ }
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ /* let the SCSI stack take care */
+ del_timer(&ep_ring->stream_timer);
+}
+
/* Watchdog timer function for when a stop endpoint command fails to complete.
* In this case, we assume the host controller is broken or dying or dead. The
* host may still be completing some other events, so we have to be careful to
@@ -2364,6 +2454,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_num++;
}
+ if ((xhci->quirks & XHCI_STREAM_QUIRK) &&
+ (ep->ep_state & EP_HAS_STREAMS))
+ del_timer(&ep_ring->stream_timer);
+
/* Look for common error cases */
switch (trb_comp_code) {
/* Skip codes that require special handling depending on
@@ -3411,6 +3505,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
check_trb_math(urb, enqd_len);
+
+ if ((xhci->quirks & XHCI_STREAM_QUIRK) && (urb->stream_id > 0) &&
+ (usb_endpoint_dir_in(&urb->ep->desc) == 1)) {
+ /* Start the stream timer so that xhci_stream_timeout() can be
+ * triggered if xhci is stuck while processing BULK IN streams.
+ */
+ ring->stream_timeout_handler = false;
+ mod_timer(&ring->stream_timer, jiffies + 5 * HZ);
+ }
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1f7d35b3a937..bf7da58b5751 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -183,7 +183,11 @@ int xhci_reset(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
command = readl(&xhci->op_regs->command);
+#ifdef CONFIG_USB_DWC3_OTG
+ command |= CMD_LRESET;
+#else
command |= CMD_RESET;
+#endif
writel(command, &xhci->op_regs->command);
/* Existing Intel xHCI controllers require a delay of 1 mS,
@@ -197,7 +201,12 @@ int xhci_reset(struct xhci_hcd *xhci)
udelay(1000);
ret = xhci_handshake(&xhci->op_regs->command,
- CMD_RESET, 0, 10 * 1000 * 1000);
+#ifdef CONFIG_USB_DWC3_OTG
+ CMD_LRESET,
+#else
+ CMD_RESET,
+#endif
+ 0, 10 * 1000 * 1000);
if (ret)
return ret;
@@ -718,6 +727,12 @@ static void xhci_stop(struct usb_hcd *hcd)
/* Only halt host and free memory after both hcds are removed */
if (!usb_hcd_is_primary_hcd(hcd)) {
+ /* Remove shared_hcd if no otg ports are present */
+ if (!hcd->self.otg_port) {
+ /* usb core will free this hcd shortly, unset pointer */
+ xhci->shared_hcd = NULL;
+ }
+
mutex_unlock(&xhci->mutex);
return;
}
@@ -1670,8 +1685,21 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto err_giveback;
}
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Delete the stream timer */
+ if ((xhci->quirks & XHCI_STREAM_QUIRK) && (urb->stream_id > 0))
+ del_timer(&ep_ring->stream_timer);
+
i = urb_priv->num_tds_done;
if (i < urb_priv->num_tds)
+
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Cancel URB %p, dev %s, ep 0x%x, "
"starting at offset 0x%llx",
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index b3afc7b76662..fbed91fa590b 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1612,6 +1612,9 @@ struct xhci_ring {
enum xhci_ring_type type;
bool last_td_was_short;
struct radix_tree_root *trb_address_map;
+ struct timer_list stream_timer;
+ bool stream_timeout_handler;
+ struct xhci_hcd *xhci;
};
struct xhci_erst_entry {
@@ -1871,6 +1874,7 @@ struct xhci_hcd {
#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
+#define XHCI_STREAM_QUIRK BIT_ULL(36) /* FIXME this is wrong */
unsigned int num_active_eps;
unsigned int limit_active_eps;
@@ -2118,6 +2122,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_td *td);
void xhci_stop_endpoint_command_watchdog(struct timer_list *t);
+void xhci_stream_timeout(struct timer_list *unused);
void xhci_handle_command_timeout(struct work_struct *work);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 24b4f091acb8..6b9114b2a6e6 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -173,6 +173,7 @@ config USB_TEGRA_PHY
config USB_ULPI
bool "Generic ULPI Transceiver Driver"
depends on ARM || ARM64
+ depends on USB_PHY
select USB_ULPI_VIEWPORT
help
Enable this to support ULPI connected USB OTG transceivers which
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index a43c49369a60..0f7f6eb16041 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -13,9 +13,16 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
+#include <linux/usb/phy.h>
struct ulpi_info {
@@ -39,6 +46,13 @@ static struct ulpi_info ulpi_ids[] = {
ULPI_INFO(ULPI_ID(0x0451, 0x1507), "TI TUSB1210"),
};
+struct ulpi_phy {
+ struct usb_phy *usb_phy;
+ void __iomem *regs;
+ unsigned int vp_offset;
+ unsigned int flags;
+};
+
static int ulpi_set_otg_flags(struct usb_phy *phy)
{
unsigned int flags = ULPI_OTG_CTRL_DP_PULLDOWN |
@@ -240,6 +254,23 @@ static int ulpi_set_vbus(struct usb_otg *otg, bool on)
return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL);
}
+static int usbphy_set_vbus(struct usb_phy *phy, int on)
+{
+ unsigned int flags = usb_phy_io_read(phy, ULPI_OTG_CTRL);
+
+ flags &= ~(ULPI_OTG_CTRL_DRVVBUS | ULPI_OTG_CTRL_DRVVBUS_EXT);
+
+ if (on) {
+ if (phy->flags & ULPI_OTG_DRVVBUS)
+ flags |= ULPI_OTG_CTRL_DRVVBUS;
+
+ if (phy->flags & ULPI_OTG_DRVVBUS_EXT)
+ flags |= ULPI_OTG_CTRL_DRVVBUS_EXT;
+ }
+
+ return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL);
+}
+
struct usb_phy *
otg_ulpi_create(struct usb_phy_io_ops *ops,
unsigned int flags)
@@ -262,6 +293,7 @@ otg_ulpi_create(struct usb_phy_io_ops *ops,
phy->io_ops = ops;
phy->otg = otg;
phy->init = ulpi_init;
+ phy->set_vbus = usbphy_set_vbus;
otg->usb_phy = phy;
otg->set_host = ulpi_set_host;
@@ -271,3 +303,70 @@ otg_ulpi_create(struct usb_phy_io_ops *ops,
}
EXPORT_SYMBOL_GPL(otg_ulpi_create);
+static int ulpi_phy_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ struct ulpi_phy *uphy;
+ bool flag;
+ int ret;
+
+ uphy = devm_kzalloc(&pdev->dev, sizeof(*uphy), GFP_KERNEL);
+ if (!uphy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ uphy->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (IS_ERR(uphy->regs))
+ return PTR_ERR(uphy->regs);
+
+ ret = of_property_read_u32(np, "view-port", &uphy->vp_offset);
+ if (IS_ERR(uphy->regs)) {
+ dev_err(&pdev->dev, "view-port register not specified\n");
+ return PTR_ERR(uphy->regs);
+ }
+
+ flag = of_property_read_bool(np, "drv-vbus");
+ if (flag)
+ uphy->flags |= ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT;
+
+ uphy->usb_phy = otg_ulpi_create(&ulpi_viewport_access_ops, uphy->flags);
+
+ uphy->usb_phy->dev = &pdev->dev;
+
+ uphy->usb_phy->io_priv = uphy->regs + uphy->vp_offset;
+
+ ret = usb_add_phy_dev(uphy->usb_phy);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ulpi_phy_remove(struct platform_device *pdev)
+{
+ struct ulpi_phy *uphy = platform_get_drvdata(pdev);
+
+ usb_remove_phy(uphy->usb_phy);
+
+ return 0;
+}
+
+static const struct of_device_id ulpi_phy_table[] = {
+ { .compatible = "ulpi-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ulpi_phy_table);
+
+static struct platform_driver ulpi_phy_driver = {
+ .probe = ulpi_phy_probe,
+ .remove = ulpi_phy_remove,
+ .driver = {
+ .name = "ulpi-phy",
+ .of_match_table = ulpi_phy_table,
+ },
+};
+module_platform_driver(ulpi_phy_driver);
+
+MODULE_DESCRIPTION("ULPI PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index de4d33fbf6b8..6bf116da11ed 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -80,6 +80,8 @@ static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller);
static void uas_free_streams(struct uas_dev_info *devinfo);
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
int status);
+static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
+ struct scsi_cmnd *cmnd);
/*
* This driver needs its own workqueue, as we need to control memory allocation.
@@ -296,18 +298,283 @@ static bool uas_evaluate_response_iu(struct response_iu *riu, struct scsi_cmnd *
return response_code == RC_TMF_SUCCEEDED;
}
+static void dummy_scsi_done(struct scsi_cmnd *cmnd)
+{
+ struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
+
+ devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL;
+ kfree(cmnd->request);
+ kfree(cmnd);
+}
+
+static void uas_workaround_cmplt(struct urb *urb)
+{
+ struct scsi_cmnd *cmnd;
+ struct uas_cmd_info *cmdinfo;
+
+ if ((urb->context != NULL) && (urb->status == 0)) {
+ cmnd = urb->context;
+ cmdinfo = (struct uas_cmd_info *)&cmnd->SCp;
+
+ if (cmdinfo->data_in_urb != urb)
+ cmnd->scsi_done(cmnd);
+ }
+
+ usb_free_urb(urb);
+}
+
+static struct urb *uas_workaround_cmnd(struct uas_dev_info *devinfo, gfp_t gfp,
+ struct scsi_cmnd *cmnd) {
+ struct scsi_device *sdev = cmnd->device;
+ struct urb *urb;
+ int err;
+
+ urb = uas_alloc_cmd_urb(devinfo, gfp, cmnd);
+ if (!urb) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate cmnd URB\n", __func__);
+ return NULL;
+ }
+
+ err = usb_submit_urb(urb, gfp);
+ if (err) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to submit cmd, err=%d\n", __func__, err);
+ goto free;
+ }
+ usb_anchor_urb(urb, &devinfo->cmd_urbs);
+ return urb;
+
+free:
+ usb_free_urb(urb);
+ return NULL;
+
+}
+
+static struct urb *uas_workaround_data(struct uas_dev_info *devinfo, gfp_t gfp,
+ struct scsi_cmnd *cmnd) {
+ struct scsi_device *sdev = cmnd->device;
+ struct usb_device *udev = devinfo->udev;
+ struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct urb *urb = usb_alloc_urb(0, gfp);
+ struct scsi_data_buffer *sdb = NULL;
+ void *temp_buf;
+ unsigned int pipe;
+ int err;
+
+ if (!urb) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate URB\n", __func__);
+ return NULL;
+ }
+
+ cmdinfo->data_in_urb = urb;
+ sdb = &cmnd->sdb;
+ pipe = devinfo->data_in_pipe;
+ temp_buf = kzalloc(sdb->length, GFP_ATOMIC);
+ if (!temp_buf) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate memory\n", __func__);
+ goto free;
+ }
+
+ usb_fill_bulk_urb(urb, udev, pipe, temp_buf, sdb->length,
+ uas_workaround_cmplt, cmnd);
+ if (devinfo->use_streams)
+ urb->stream_id = cmdinfo->uas_tag;
+ urb->transfer_flags |= URB_FREE_BUFFER;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to submit Data In urb, err = %d\n",
+ __func__, err);
+ goto free;
+ }
+
+ usb_anchor_urb(urb, &devinfo->data_urbs);
+ return urb;
+
+free:
+ usb_free_urb(urb);
+ return NULL;
+}
+
+static struct urb *uas_workaround_sense(struct uas_dev_info *devinfo, gfp_t gfp,
+ struct scsi_cmnd *cmnd) {
+ struct scsi_device *sdev = cmnd->device;
+ struct usb_device *udev = devinfo->udev;
+ struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct urb *urb = usb_alloc_urb(0, gfp);
+ struct sense_iu *iu;
+ int err;
+
+ if (!urb) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate URB\n", __func__);
+ return NULL;
+ }
+
+ iu = kzalloc(sizeof(*iu), gfp);
+ if (!iu) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate memory for sense_iu\n",
+ __func__);
+ goto free;
+ }
+
+ usb_fill_bulk_urb(urb, udev, devinfo->status_pipe, iu, sizeof(*iu),
+ uas_workaround_cmplt, cmnd);
+ if (devinfo->use_streams)
+ urb->stream_id = cmdinfo->uas_tag;
+ urb->transfer_flags |= URB_FREE_BUFFER;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to submit Sense urb, err = %d\n",
+ __func__, err);
+ goto free;
+ }
+ usb_anchor_urb(urb, &devinfo->sense_urbs);
+ return urb;
+
+free:
+ usb_free_urb(urb);
+ return NULL;
+}
+
+/*
+ * This function is called only if the DATA IN stream timer expired, which
+ * means xhci host controller has failed to process the TRB's present in the
+ * stream ring. As a part of recovery sequence, this function re-submits the
+ * previous stopped urb on which xhci failed to process data and along with
+ * that urb it prepares & submits sense, data and cmnd urb with scsi command
+ * set to standard inquiry request containing the next free stream id tag.
+ * Doing so will make the xhci start processing the previous stopped urb
+ * along with the urb that has standard inquiry scsi command.
+ */
+static int uas_workaround(struct urb *urb)
+{
+ struct scsi_cmnd *cmnd = urb->context;
+ struct scsi_device *sdev = cmnd->device;
+ struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
+ struct scsi_cmnd *temp_cmnd;
+ struct uas_cmd_info *temp_cmdinfo;
+ struct urb *sense_urb, *data_urb, *cmnd_urb;
+ struct request *temp_request;
+ unsigned int idx;
+ int err;
+ char inquiry[16] = { 0x12, 0x0, 0x0, 0x0, 0x10, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 };
+
+
+ /* Find a free uas-tag */
+ for (idx = 0; idx < devinfo->qdepth; idx++) {
+ if (!devinfo->cmnd[idx])
+ break;
+ }
+
+ if (idx == devinfo->qdepth) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to find free tag\n", __func__);
+ err = -EINVAL;
+ goto free;
+ }
+
+ /* Create a scsi_cmnd and send dummy inquiry data on the next
+ * available tag
+ */
+ temp_cmnd = kzalloc(sizeof(struct scsi_cmnd), GFP_ATOMIC);
+ if (!temp_cmnd) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate memory for scsi_cmnd\n",
+ __func__);
+ err = -ENOMEM;
+ goto free;
+ }
+
+ temp_request = kzalloc(sizeof(struct request), GFP_ATOMIC);
+ if (!temp_cmnd) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate memory for request\n",
+ __func__);
+ err = -ENOMEM;
+ goto free;
+ }
+
+ temp_cmnd->device = cmnd->device;
+ temp_cmnd->cmnd = inquiry;
+ temp_cmnd->cmd_len = 16;
+ temp_cmnd->sdb.length = 0x10;
+ temp_cmnd->scsi_done = dummy_scsi_done;
+ temp_request->tag = idx;
+ temp_cmnd->request = temp_request;
+
+ temp_cmdinfo = (struct uas_cmd_info *)&temp_cmnd->SCp;
+ memset(temp_cmdinfo, 0, sizeof(struct uas_cmd_info));
+
+ temp_cmdinfo->uas_tag = idx + 1;
+ devinfo->cmnd[idx] = temp_cmnd;
+
+ /* Submit previously stopped URB first */
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: submit err %d\n", __func__, err);
+ kfree(temp_cmnd);
+ kfree(temp_request);
+ goto free;
+ }
+ usb_anchor_urb(urb, &devinfo->data_urbs);
+
+ /* Allocate and submit SENSE urb for next available tag */
+ sense_urb = uas_workaround_sense(devinfo, GFP_ATOMIC, temp_cmnd);
+ if (!sense_urb) {
+ kfree(temp_request);
+ kfree(temp_cmnd);
+ goto free;
+ }
+
+ /* Allocate and submit DATA IN urb for next available tag */
+ data_urb = uas_workaround_data(devinfo, GFP_ATOMIC, temp_cmnd);
+ if (!data_urb) {
+ /* Kill previously allocated sense urb */
+ sense_urb->context = NULL;
+ usb_kill_urb(sense_urb);
+ usb_put_urb(sense_urb);
+ kfree(temp_request);
+ kfree(temp_cmnd);
+ goto free;
+ }
+
+ /* Allocate and submit CMND urb with dummy inquiry data */
+ cmnd_urb = uas_workaround_cmnd(devinfo, GFP_ATOMIC, temp_cmnd);
+ if (!cmnd_urb) {
+ /* Kill previously allocated data urb */
+ data_urb->context = NULL;
+ usb_kill_urb(data_urb);
+ usb_put_urb(data_urb);
+ kfree(temp_request);
+ kfree(temp_cmnd);
+ }
+
+free:
+ return err;
+}
+
static void uas_stat_cmplt(struct urb *urb)
{
struct iu *iu = urb->transfer_buffer;
- struct Scsi_Host *shost = urb->context;
- struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)urb->context;
+ struct uas_dev_info *devinfo =
+ (struct uas_dev_info *)cmnd->device->hostdata;
struct urb *data_in_urb = NULL;
struct urb *data_out_urb = NULL;
- struct scsi_cmnd *cmnd;
struct uas_cmd_info *cmdinfo;
unsigned long flags;
unsigned int idx;
int status = urb->status;
+ int err;
bool success;
spin_lock_irqsave(&devinfo->lock, flags);
@@ -316,6 +583,21 @@ static void uas_stat_cmplt(struct urb *urb)
goto out;
if (status) {
+ if (status == -EAGAIN) {
+ /* We get here only if the xhci stream timer expires,
+ * call uas_workaround() with this urb as argument.
+ */
+ err = uas_workaround(urb);
+ if (err != 0) {
+ dev_err(&urb->dev->dev,
+ "%s: uas_workaround() failed, err=%d\n",
+ __func__, err);
+ goto out;
+ }
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ return;
+ }
+
if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
dev_err(&urb->dev->dev, "stat urb: status %d\n", status);
goto out;
@@ -398,10 +680,27 @@ static void uas_data_cmplt(struct urb *urb)
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
struct scsi_data_buffer *sdb = &cmnd->sdb;
unsigned long flags;
+ int err;
int status = urb->status;
spin_lock_irqsave(&devinfo->lock, flags);
+ if ((status == -EAGAIN) && (!devinfo->resetting) &&
+ (cmdinfo->data_in_urb == urb)) {
+ /* We get here only if the xhci stream timer expires,
+ * call uas_workaround() with this urb as argument.
+ */
+ err = uas_workaround(urb);
+ if (err != 0) {
+ dev_err(&urb->dev->dev,
+ "%s: uas_workaround() failed, err=%d\n",
+ __func__, err);
+ goto out;
+ }
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ return;
+ }
+
if (cmdinfo->data_in_urb == urb) {
cmdinfo->state &= ~DATA_IN_URB_INFLIGHT;
cmdinfo->data_in_urb = NULL;
@@ -480,7 +779,7 @@ static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
goto free;
usb_fill_bulk_urb(urb, udev, devinfo->status_pipe, iu, sizeof(*iu),
- uas_stat_cmplt, cmnd->device->host);
+ uas_stat_cmplt, cmnd);
if (devinfo->use_streams)
urb->stream_id = cmdinfo->uas_tag;
urb->transfer_flags |= URB_FREE_BUFFER;
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 37157ed9a881..b080a59113c0 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -45,6 +45,12 @@ UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
+UNUSUAL_DEV(0x0525, 0xa4a5, 0x0000, 0x9999,
+ "Netchip",
+ "Target Product",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/* Reported-by: David Webb <djw@noc.ac.uk> */
UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
"Seagate",
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index a22f2d431a35..b6a1b43e66c2 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -335,8 +335,10 @@ static int cdns_wdt_probe(struct platform_device *pdev)
wdt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt->clk)) {
- dev_err(dev, "input clock not found\n");
- return PTR_ERR(wdt->clk);
+ ret = PTR_ERR(wdt->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "input clock not found\n");
+ return ret;
}
ret = clk_prepare_enable(wdt->clk);
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 03786992b701..26a23f98017e 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -2,7 +2,7 @@
/*
* Watchdog Device Driver for Xilinx axi/xps_timebase_wdt
*
- * (C) Copyright 2013 - 2014 Xilinx, Inc.
+ * (C) Copyright 2013 - 2019 Xilinx, Inc.
* (C) Copyright 2011 (Alejandro Cabrera <aldaya@gmail.com>)
*/
@@ -18,18 +18,37 @@
#include <linux/of_device.h>
#include <linux/of_address.h>
+#define XWT_WWDT_DEFAULT_TIMEOUT 10
+#define XWT_WWDT_MIN_TIMEOUT 1
+#define XWT_WWDT_MAX_TIMEOUT 80
+
/* Register offsets for the Wdt device */
#define XWT_TWCSR0_OFFSET 0x0 /* Control/Status Register0 */
#define XWT_TWCSR1_OFFSET 0x4 /* Control/Status Register1 */
#define XWT_TBR_OFFSET 0x8 /* Timebase Register Offset */
+#define XWT_WWREF_OFFSET 0x1000 /* Refresh Register */
+#define XWT_WWCSR_OFFSET 0x2000 /* Control/Status Register */
+#define XWT_WWOFF_OFFSET 0x2008 /* Offset Register */
+#define XWT_WWCMP0_OFFSET 0x2010 /* Compare Value Register0 */
+#define XWT_WWCMP1_OFFSET 0x2014 /* Compare Value Register1 */
+#define XWT_WWWRST_OFFSET 0x2FD0 /* Warm Reset Register */
/* Control/Status Register Masks */
-#define XWT_CSR0_WRS_MASK 0x00000008 /* Reset status */
-#define XWT_CSR0_WDS_MASK 0x00000004 /* Timer state */
-#define XWT_CSR0_EWDT1_MASK 0x00000002 /* Enable bit 1 */
+#define XWT_CSR0_WRS_MASK BIT(3) /* Reset status */
+#define XWT_CSR0_WDS_MASK BIT(2) /* Timer state */
+#define XWT_CSR0_EWDT1_MASK BIT(1) /* Enable bit 1 */
/* Control/Status Register 0/1 bits */
-#define XWT_CSRX_EWDT2_MASK 0x00000001 /* Enable bit 2 */
+#define XWT_CSRX_EWDT2_MASK BIT(0) /* Enable bit 2 */
+
+/* Refresh Register Masks */
+#define XWT_WWREF_GWRR_MASK BIT(0) /* Refresh and start new period */
+
+/* Generic Control/Status Register Masks */
+#define XWT_WWCSR_GWEN_MASK BIT(0) /* Enable Bit */
+
+/* Warm Reset Register Masks */
+#define XWT_WWRST_GWWRR_MASK BIT(0) /* Warm Reset Register */
/* SelfTest constants */
#define XWT_MAX_SELFTEST_LOOP_COUNT 0x00010000
@@ -37,10 +56,34 @@
#define WATCHDOG_NAME "Xilinx Watchdog"
+static int wdt_timeout;
+
+module_param(wdt_timeout, int, 0644);
+MODULE_PARM_DESC(wdt_timeout,
+ "Watchdog time in seconds. (default="
+ __MODULE_STRING(XWDT_WWDT_DEFAULT_TIMEOUT) ")");
+
+/**
+ * enum xwdt_ip_type - WDT IP type.
+ *
+ * @XWDT_WDT: Soft wdt ip.
+ * @XWDT_WWDT: Window wdt ip.
+ */
+enum xwdt_ip_type {
+ XWDT_WDT = 0,
+ XWDT_WWDT,
+};
+
+struct xwdt_devtype_data {
+ enum xwdt_ip_type wdttype;
+ const struct watchdog_ops *xwdt_ops;
+ const struct watchdog_info *xwdt_info;
+};
+
struct xwdt_device {
void __iomem *base;
u32 wdt_interval;
- spinlock_t spinlock;
+ spinlock_t spinlock; /* spinlock for register handling */
struct watchdog_device xilinx_wdt_wdd;
struct clk *clk;
};
@@ -50,6 +93,7 @@ static int xilinx_wdt_start(struct watchdog_device *wdd)
int ret;
u32 control_status_reg;
struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
ret = clk_enable(xdev->clk);
if (ret) {
@@ -70,6 +114,8 @@ static int xilinx_wdt_start(struct watchdog_device *wdd)
spin_unlock(&xdev->spinlock);
+ dev_dbg(xilinx_wdt_wdd->parent, "Watchdog Started!\n");
+
return 0;
}
@@ -77,6 +123,7 @@ static int xilinx_wdt_stop(struct watchdog_device *wdd)
{
u32 control_status_reg;
struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
spin_lock(&xdev->spinlock);
@@ -91,7 +138,7 @@ static int xilinx_wdt_stop(struct watchdog_device *wdd)
clk_disable(xdev->clk);
- pr_info("Stopped!\n");
+ dev_dbg(xilinx_wdt_wdd->parent, "Watchdog Stopped!\n");
return 0;
}
@@ -126,6 +173,126 @@ static const struct watchdog_ops xilinx_wdt_ops = {
.ping = xilinx_wdt_keepalive,
};
+static int xilinx_wwdt_start(struct watchdog_device *wdd)
+{
+ int ret;
+ u32 control_status_reg;
+ u64 count;
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
+
+ unsigned long clock_f = clk_get_rate(xdev->clk);
+
+ /* Calculate timeout count */
+ count = wdd->timeout * clock_f;
+ ret = clk_enable(xdev->clk);
+ if (ret) {
+ dev_err(wdd->parent, "Failed to enable clock\n");
+ return ret;
+ }
+
+ spin_lock(&xdev->spinlock);
+
+ /*
+ * Timeout count is half as there are two windows
+ * first window overflow is ignored (interrupt),
+ * reset is only generated at second window overflow
+ */
+ count = count >> 1;
+
+ /* Disable the generic watchdog timer */
+ control_status_reg = ioread32(xdev->base + XWT_WWCSR_OFFSET);
+ control_status_reg &= ~(XWT_WWCSR_GWEN_MASK);
+ iowrite32(control_status_reg, xdev->base + XWT_WWCSR_OFFSET);
+
+ /* Set compare and offset registers for generic watchdog timeout */
+ iowrite32((u32)count, xdev->base + XWT_WWCMP0_OFFSET);
+ iowrite32((u32)0, xdev->base + XWT_WWCMP1_OFFSET);
+ iowrite32((u32)count, xdev->base + XWT_WWOFF_OFFSET);
+
+ /* Enable the generic watchdog timer */
+ control_status_reg = ioread32(xdev->base + XWT_WWCSR_OFFSET);
+ control_status_reg |= (XWT_WWCSR_GWEN_MASK);
+ iowrite32(control_status_reg, xdev->base + XWT_WWCSR_OFFSET);
+
+ spin_unlock(&xdev->spinlock);
+
+ dev_dbg(xilinx_wdt_wdd->parent, "Watchdog Started!\n");
+
+ return 0;
+}
+
+static int xilinx_wwdt_stop(struct watchdog_device *wdd)
+{
+ u32 control_status_reg;
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
+
+ spin_lock(&xdev->spinlock);
+
+ /* Disable the generic watchdog timer */
+ control_status_reg = ioread32(xdev->base + XWT_WWCSR_OFFSET);
+ control_status_reg &= ~(XWT_WWCSR_GWEN_MASK);
+ iowrite32(control_status_reg, xdev->base + XWT_WWCSR_OFFSET);
+
+ spin_unlock(&xdev->spinlock);
+
+ clk_disable(xdev->clk);
+
+ dev_dbg(xilinx_wdt_wdd->parent, "Watchdog Stopped!\n");
+
+ return 0;
+}
+
+static int xilinx_wwdt_keepalive(struct watchdog_device *wdd)
+{
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+
+ spin_lock(&xdev->spinlock);
+
+ iowrite32(XWT_WWREF_GWRR_MASK, xdev->base + XWT_WWREF_OFFSET);
+
+ spin_unlock(&xdev->spinlock);
+
+ return 0;
+}
+
+static int xilinx_wwdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int new_time)
+{
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
+
+ if (new_time < XWT_WWDT_MIN_TIMEOUT ||
+ new_time > XWT_WWDT_MAX_TIMEOUT) {
+ dev_warn(xilinx_wdt_wdd->parent,
+ "timeout value must be %d<=x<=%d, using %d\n",
+ XWT_WWDT_MIN_TIMEOUT,
+ XWT_WWDT_MAX_TIMEOUT, new_time);
+ return -EINVAL;
+ }
+
+ wdd->timeout = new_time;
+
+ return xilinx_wwdt_start(wdd);
+}
+
+static const struct watchdog_info xilinx_wwdt_ident = {
+ .options = WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT,
+ .firmware_version = 1,
+ .identity = "xlnx_wwdt watchdog",
+};
+
+static const struct watchdog_ops xilinx_wwdt_ops = {
+ .owner = THIS_MODULE,
+ .start = xilinx_wwdt_start,
+ .stop = xilinx_wwdt_stop,
+ .ping = xilinx_wwdt_keepalive,
+ .set_timeout = xilinx_wwdt_set_timeout,
+};
+
static u32 xwdt_selftest(struct xwdt_device *xdev)
{
int i;
@@ -156,6 +323,29 @@ static void xwdt_clk_disable_unprepare(void *data)
clk_disable_unprepare(data);
}
+static const struct xwdt_devtype_data xwdt_wdt_data = {
+ .wdttype = XWDT_WDT,
+ .xwdt_info = &xilinx_wdt_ident,
+ .xwdt_ops = &xilinx_wdt_ops,
+};
+
+static const struct xwdt_devtype_data xwdt_wwdt_data = {
+ .wdttype = XWDT_WWDT,
+ .xwdt_info = &xilinx_wwdt_ident,
+ .xwdt_ops = &xilinx_wwdt_ops,
+};
+
+static const struct of_device_id xwdt_of_match[] = {
+ { .compatible = "xlnx,xps-timebase-wdt-1.00.a",
+ .data = &xwdt_wdt_data },
+ { .compatible = "xlnx,xps-timebase-wdt-1.01.a",
+ .data = &xwdt_wdt_data },
+ { .compatible = "xlnx,versal-wwdt-1.0",
+ .data = &xwdt_wwdt_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xwdt_of_match);
+
static int xwdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -163,32 +353,49 @@ static int xwdt_probe(struct platform_device *pdev)
u32 pfreq = 0, enable_once = 0;
struct xwdt_device *xdev;
struct watchdog_device *xilinx_wdt_wdd;
+ const struct of_device_id *of_id;
+ enum xwdt_ip_type wdttype;
+ const struct xwdt_devtype_data *devtype;
xdev = devm_kzalloc(dev, sizeof(*xdev), GFP_KERNEL);
if (!xdev)
return -ENOMEM;
xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
- xilinx_wdt_wdd->info = &xilinx_wdt_ident;
- xilinx_wdt_wdd->ops = &xilinx_wdt_ops;
+
+ of_id = of_match_device(xwdt_of_match, &pdev->dev);
+ if (!of_id)
+ return -EINVAL;
+
+ devtype = of_id->data;
+
+ wdttype = devtype->wdttype;
+
+ xilinx_wdt_wdd->info = devtype->xwdt_info;
+ xilinx_wdt_wdd->ops = devtype->xwdt_ops;
xilinx_wdt_wdd->parent = dev;
xdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xdev->base))
return PTR_ERR(xdev->base);
- rc = of_property_read_u32(dev->of_node, "xlnx,wdt-interval",
- &xdev->wdt_interval);
- if (rc)
- dev_warn(dev, "Parameter \"xlnx,wdt-interval\" not found\n");
+ if (wdttype == XWDT_WDT) {
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "xlnx,wdt-interval",
+ &xdev->wdt_interval);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "Parameter \"xlnx,wdt-interval\" not found\n");
- rc = of_property_read_u32(dev->of_node, "xlnx,wdt-enable-once",
- &enable_once);
- if (rc)
- dev_warn(dev,
- "Parameter \"xlnx,wdt-enable-once\" not found\n");
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "xlnx,wdt-enable-once",
+ &enable_once);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "Parameter \"xlnx,wdt-enable-once\" not found\n");
- watchdog_set_nowayout(xilinx_wdt_wdd, enable_once);
+ watchdog_set_nowayout(xilinx_wdt_wdd, enable_once);
+ }
xdev->clk = devm_clk_get(dev, NULL);
if (IS_ERR(xdev->clk)) {
@@ -210,13 +417,28 @@ static int xwdt_probe(struct platform_device *pdev)
pfreq = clk_get_rate(xdev->clk);
}
- /*
- * Twice of the 2^wdt_interval / freq because the first wdt overflow is
- * ignored (interrupt), reset is only generated at second wdt overflow
- */
- if (pfreq && xdev->wdt_interval)
- xilinx_wdt_wdd->timeout = 2 * ((1 << xdev->wdt_interval) /
- pfreq);
+ if (wdttype == XWDT_WDT) {
+ /*
+ * Twice of the 2^wdt_interval / freq because
+ * the first wdt overflow is ignored (interrupt),
+ * reset is only generated at second wdt overflow
+ */
+ if (pfreq && xdev->wdt_interval)
+ xilinx_wdt_wdd->timeout =
+ 2 * ((1 << xdev->wdt_interval) /
+ pfreq);
+ } else {
+ xilinx_wdt_wdd->timeout = XWT_WWDT_DEFAULT_TIMEOUT;
+ xilinx_wdt_wdd->min_timeout = XWT_WWDT_MIN_TIMEOUT;
+ xilinx_wdt_wdd->max_timeout = XWT_WWDT_MAX_TIMEOUT;
+
+ rc = watchdog_init_timeout(xilinx_wdt_wdd,
+ wdt_timeout, &pdev->dev);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to set timeout value\n");
+ return rc;
+ }
+ }
spin_lock_init(&xdev->spinlock);
watchdog_set_drvdata(xilinx_wdt_wdd, xdev);
@@ -231,10 +453,12 @@ static int xwdt_probe(struct platform_device *pdev)
if (rc)
return rc;
- rc = xwdt_selftest(xdev);
- if (rc == XWT_TIMER_FAILED) {
- dev_err(dev, "SelfTest routine error\n");
- return rc;
+ if (wdttype == XWDT_WDT) {
+ rc = xwdt_selftest(xdev);
+ if (rc == XWT_TIMER_FAILED) {
+ dev_err(dev, "SelfTest routine error\n");
+ return rc;
+ }
}
rc = devm_watchdog_register_device(dev, xilinx_wdt_wdd);
@@ -262,9 +486,10 @@ static int xwdt_probe(struct platform_device *pdev)
static int __maybe_unused xwdt_suspend(struct device *dev)
{
struct xwdt_device *xdev = dev_get_drvdata(dev);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
- if (watchdog_active(&xdev->xilinx_wdt_wdd))
- xilinx_wdt_stop(&xdev->xilinx_wdt_wdd);
+ if (watchdog_active(xilinx_wdt_wdd))
+ xilinx_wdt_wdd->ops->stop(xilinx_wdt_wdd);
return 0;
}
@@ -278,24 +503,17 @@ static int __maybe_unused xwdt_suspend(struct device *dev)
static int __maybe_unused xwdt_resume(struct device *dev)
{
struct xwdt_device *xdev = dev_get_drvdata(dev);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
int ret = 0;
- if (watchdog_active(&xdev->xilinx_wdt_wdd))
- ret = xilinx_wdt_start(&xdev->xilinx_wdt_wdd);
+ if (watchdog_active(xilinx_wdt_wdd))
+ ret = xilinx_wdt_wdd->ops->start(xilinx_wdt_wdd);
return ret;
}
static SIMPLE_DEV_PM_OPS(xwdt_pm_ops, xwdt_suspend, xwdt_resume);
-/* Match table for of_platform binding */
-static const struct of_device_id xwdt_of_match[] = {
- { .compatible = "xlnx,xps-timebase-wdt-1.00.a", },
- { .compatible = "xlnx,xps-timebase-wdt-1.01.a", },
- {},
-};
-MODULE_DEVICE_TABLE(of, xwdt_of_match);
-
static struct platform_driver xwdt_driver = {
.probe = xwdt_probe,
.driver = {
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 8f66e4f4256e..19f2e7c77be8 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -365,12 +365,12 @@ static void __save_error_info(struct super_block *sb, const char *func,
return;
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
ext4_update_tstamp(es, s_last_error_time);
- strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
+ strscpy_pad(es->s_last_error_func, func, sizeof(es->s_last_error_func));
es->s_last_error_line = cpu_to_le32(line);
if (!es->s_first_error_time) {
es->s_first_error_time = es->s_last_error_time;
es->s_first_error_time_hi = es->s_last_error_time_hi;
- strncpy(es->s_first_error_func, func,
+ strscpy_pad(es->s_first_error_func, func,
sizeof(es->s_first_error_func));
es->s_first_error_line = cpu_to_le32(line);
es->s_first_error_ino = es->s_last_error_ino;
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index c1a8d4a41bb1..a3d6d09fbb55 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -46,6 +46,8 @@ struct af_alg_type {
void *(*bind)(const char *name, u32 type, u32 mask);
void (*release)(void *private);
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
+ int (*setkeytype)(void *private, const u8 *keytype,
+ unsigned int keylen);
int (*accept)(void *private, struct sock *sk);
int (*accept_nokey)(void *private, struct sock *sk);
int (*setauthsize)(void *private, unsigned int authsize);
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 26be16cc46e3..341673b478d0 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -37,6 +37,8 @@ struct skcipher_request {
struct crypto_skcipher {
int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
+ int (*setkeytype)(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
int (*encrypt)(struct skcipher_request *req);
int (*decrypt)(struct skcipher_request *req);
@@ -409,6 +411,12 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
return crypto_skcipher_setkey(&tfm->base, key, keylen);
}
+static inline int crypto_skcipher_setkeytype(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return tfm->setkeytype(tfm, key, keylen);
+}
+
static inline unsigned int crypto_skcipher_default_keysize(
struct crypto_skcipher *tfm)
{
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
index a09864f6d684..4059e4486f84 100644
--- a/include/drm/drm_encoder_slave.h
+++ b/include/drm/drm_encoder_slave.h
@@ -27,6 +27,7 @@
#ifndef __DRM_ENCODER_SLAVE_H__
#define __DRM_ENCODER_SLAVE_H__
+#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
@@ -159,6 +160,29 @@ static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *dri
void drm_i2c_encoder_destroy(struct drm_encoder *encoder);
+/**
+ * struct drm_platform_encoder_driver
+ * @platform_driver: platform device driver
+ * @encoder_init: callback to initialize the slave encoder
+ *
+ * Describes a device driver for an encoder connected to
+ * through a platform bus. In addition to the entry points in @platform_driver
+ * an @encoder_init function should be provided. It will be called to
+ * give the driver an opportunity to allocate any per-encoder data
+ * structures and to initialize the @slave_funcs and (optionally)
+ * @slave_priv members of @encoder.
+ */
+struct drm_platform_encoder_driver {
+ struct platform_driver platform_driver;
+
+ int (*encoder_init)(struct platform_device *pdev,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder);
+
+};
+#define to_drm_platform_encoder_driver(x) container_of((x), \
+ struct drm_platform_encoder_driver, \
+ platform_driver)
/*
* Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index b3d9d88ab290..4d66ba6be06d 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -122,7 +122,23 @@ struct drm_format_info {
* drm_format_info_block_height()
*/
u8 block_h[3];
-
+ /**
+ * @pixels_per_macropixel:
+ * Number of pixels per macro-pixel (per plane). A macro-pixel is
+ * composed of multiple pixels, and there can be extra bits between
+ * pixels. This must be used along with @bytes_per_macropixel, only
+ * when single pixel size is not byte-aligned. In this case, @cpp
+ * is not valid and should be 0.
+ */
+ u8 pixels_per_macropixel[3];
+ /**
+ * @bytes_per_macropixel:
+ * Number of bytes per macro-pixel (per plane). A macro-pixel is
+ * composed of multiple pixels. The size of single macro-pixel should
+ * be byte-aligned. This should be used with @pixels_per_macropixel,
+ * and @cpp should be 0.
+ */
+ u8 bytes_per_macropixel[3];
/** @hsub: Horizontal chroma subsampling factor */
u8 hsub;
/** @vsub: Vertical chroma subsampling factor */
@@ -280,6 +296,8 @@ unsigned int drm_format_info_block_height(const struct drm_format_info *info,
int plane);
uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
int plane, unsigned int buffer_width);
+int drm_format_plane_width_bytes(const struct drm_format_info *info,
+ int plane, int width);
const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/dt-bindings/clock/xlnx-versal-clk.h b/include/dt-bindings/clock/xlnx-versal-clk.h
new file mode 100644
index 000000000000..264d634d226e
--- /dev/null
+++ b/include/dt-bindings/clock/xlnx-versal-clk.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Xilinx Inc.
+ *
+ */
+
+#ifndef _DT_BINDINGS_CLK_VERSAL_H
+#define _DT_BINDINGS_CLK_VERSAL_H
+
+#define PMC_PLL 1
+#define APU_PLL 2
+#define RPU_PLL 3
+#define CPM_PLL 4
+#define NOC_PLL 5
+#define PLL_MAX 6
+#define PMC_PRESRC 7
+#define PMC_POSTCLK 8
+#define PMC_PLL_OUT 9
+#define PPLL 10
+#define NOC_PRESRC 11
+#define NOC_POSTCLK 12
+#define NOC_PLL_OUT 13
+#define NPLL 14
+#define APU_PRESRC 15
+#define APU_POSTCLK 16
+#define APU_PLL_OUT 17
+#define APLL 18
+#define RPU_PRESRC 19
+#define RPU_POSTCLK 20
+#define RPU_PLL_OUT 21
+#define RPLL 22
+#define CPM_PRESRC 23
+#define CPM_POSTCLK 24
+#define CPM_PLL_OUT 25
+#define CPLL 26
+#define PPLL_TO_XPD 27
+#define NPLL_TO_XPD 28
+#define APLL_TO_XPD 29
+#define RPLL_TO_XPD 30
+#define EFUSE_REF 31
+#define SYSMON_REF 32
+#define IRO_SUSPEND_REF 33
+#define USB_SUSPEND 34
+#define SWITCH_TIMEOUT 35
+#define RCLK_PMC 36
+#define RCLK_LPD 37
+#define WDT 38
+#define TTC0 39
+#define TTC1 40
+#define TTC2 41
+#define TTC3 42
+#define GEM_TSU 43
+#define GEM_TSU_LB 44
+#define MUXED_IRO_DIV2 45
+#define MUXED_IRO_DIV4 46
+#define PSM_REF 47
+#define GEM0_RX 48
+#define GEM0_TX 49
+#define GEM1_RX 50
+#define GEM1_TX 51
+#define CPM_CORE_REF 52
+#define CPM_LSBUS_REF 53
+#define CPM_DBG_REF 54
+#define CPM_AUX0_REF 55
+#define CPM_AUX1_REF 56
+#define QSPI_REF 57
+#define OSPI_REF 58
+#define SDIO0_REF 59
+#define SDIO1_REF 60
+#define PMC_LSBUS_REF 61
+#define I2C_REF 62
+#define TEST_PATTERN_REF 63
+#define DFT_OSC_REF 64
+#define PMC_PL0_REF 65
+#define PMC_PL1_REF 66
+#define PMC_PL2_REF 67
+#define PMC_PL3_REF 68
+#define CFU_REF 69
+#define SPARE_REF 70
+#define NPI_REF 71
+#define HSM0_REF 72
+#define HSM1_REF 73
+#define SD_DLL_REF 74
+#define FPD_TOP_SWITCH 75
+#define FPD_LSBUS 76
+#define ACPU 77
+#define DBG_TRACE 78
+#define DBG_FPD 79
+#define LPD_TOP_SWITCH 80
+#define ADMA 81
+#define LPD_LSBUS 82
+#define CPU_R5 83
+#define CPU_R5_CORE 84
+#define CPU_R5_OCM 85
+#define CPU_R5_OCM2 86
+#define IOU_SWITCH 87
+#define GEM0_REF 88
+#define GEM1_REF 89
+#define GEM_TSU_REF 90
+#define USB0_BUS_REF 91
+#define UART0_REF 92
+#define UART1_REF 93
+#define SPI0_REF 94
+#define SPI1_REF 95
+#define CAN0_REF 96
+#define CAN1_REF 97
+#define I2C0_REF 98
+#define I2C1_REF 99
+#define DBG_LPD 100
+#define TIMESTAMP_REF 101
+#define DBG_TSTMP 102
+#define CPM_TOPSW_REF 103
+#define USB3_DUAL_REF 104
+#define OUTCLK_MAX 105
+#define REF_CLK 106
+#define PL_ALT_REF_CLK 107
+#define MUXED_IRO 108
+#define PL_EXT 109
+#define PL_LB 110
+#define MIO_50_OR_51 111
+#define MIO_24_OR_25 112
+
+#endif
diff --git a/include/dt-bindings/drm/mipi-dsi.h b/include/dt-bindings/drm/mipi-dsi.h
new file mode 100644
index 000000000000..c6f37ec661fe
--- /dev/null
+++ b/include/dt-bindings/drm/mipi-dsi.h
@@ -0,0 +1,11 @@
+#ifndef __DT_BINDINGS_DRM__
+#define __DT_BINDINGS_DRM__
+/*
+ * MIPI DSI pixel formats as defined in the include/drm/drm_mipi_dsi.h"
+ */
+#define MIPI_DSI_FMT_RGB888 0
+#define MIPI_DSI_FMT_RGB666 1
+#define MIPI_DSI_FMT_RGB666_PACKED 2
+#define MIPI_DSI_FMT_RGB565 3
+
+#endif /* _DT_BINDINGS_DRM__ */
diff --git a/include/dt-bindings/media/xilinx-vip.h b/include/dt-bindings/media/xilinx-vip.h
index 94ed3edfcc70..beb50a7483bc 100644
--- a/include/dt-bindings/media/xilinx-vip.h
+++ b/include/dt-bindings/media/xilinx-vip.h
@@ -32,5 +32,11 @@
#define XVIP_VF_CUSTOM2 13
#define XVIP_VF_CUSTOM3 14
#define XVIP_VF_CUSTOM4 15
+#define XVIP_VF_VUY_422 16
+#define XVIP_VF_BGRX 17
+#define XVIP_VF_YUVX 18
+#define XVIP_VF_XBGR 19
+#define XVIP_VF_Y_GREY 20
+#define XVIP_VF_XRGB 21
#endif /* __DT_BINDINGS_MEDIA_XILINX_VIP_H__ */
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
index b6a1eaf1b339..f6bc83b66ae9 100644
--- a/include/dt-bindings/phy/phy.h
+++ b/include/dt-bindings/phy/phy.h
@@ -16,5 +16,7 @@
#define PHY_TYPE_USB2 3
#define PHY_TYPE_USB3 4
#define PHY_TYPE_UFS 5
+#define PHY_TYPE_DP 6
+#define PHY_TYPE_SGMII 7
#endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/pinctrl/pinctrl-zynqmp.h b/include/dt-bindings/pinctrl/pinctrl-zynqmp.h
new file mode 100644
index 000000000000..65522a1f032d
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-zynqmp.h
@@ -0,0 +1,36 @@
+/*
+ * MIO pin configuration defines for Xilinx ZynqMP
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ * Author: Chirag Parekh <chirag.parekh@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_ZYNQMP_H
+#define _DT_BINDINGS_PINCTRL_ZYNQMP_H
+
+/* Bit value for IO standards */
+#define IO_STANDARD_LVCMOS33 0
+#define IO_STANDARD_LVCMOS18 1
+
+/* Bit values for Slew Rates */
+#define SLEW_RATE_FAST 0
+#define SLEW_RATE_SLOW 1
+
+/* Bit values for Pin inputs */
+#define PIN_INPUT_TYPE_CMOS 0
+#define PIN_INPUT_TYPE_SCHMITT 1
+
+/* Bit values for drive control*/
+#define DRIVE_STRENGTH_2MA 2
+#define DRIVE_STRENGTH_4MA 4
+#define DRIVE_STRENGTH_8MA 8
+#define DRIVE_STRENGTH_12MA 12
+
+#endif /* _DT_BINDINGS_PINCTRL_ZYNQMP_H */
diff --git a/include/dt-bindings/power/xlnx-versal-power.h b/include/dt-bindings/power/xlnx-versal-power.h
new file mode 100644
index 000000000000..5ad85a1ef140
--- /dev/null
+++ b/include/dt-bindings/power/xlnx-versal-power.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_VERSAL_POWER_H
+#define _DT_BINDINGS_VERSAL_POWER_H
+
+#define PD_SWDT_FPD 0x18224029
+#define PD_USB_0 0x18224018
+#define PD_GEM_0 0x18224019
+#define PD_GEM_1 0x1822401a
+#define PD_SPI_0 0x1822401b
+#define PD_SPI_1 0x1822401c
+#define PD_I2C_0 0x1822401d
+#define PD_I2C_1 0x1822401e
+#define PD_CAN_FD_0 0x1822401f
+#define PD_CAN_FD_1 0x18224020
+#define PD_UART_0 0x18224021
+#define PD_UART_1 0x18224022
+#define PD_GPIO 0x18224023
+#define PD_TTC_0 0x18224024
+#define PD_TTC_1 0x18224025
+#define PD_TTC_2 0x18224026
+#define PD_TTC_3 0x18224027
+#define PD_OSPI 0x1822402a
+#define PD_QSPI 0x1822402b
+#define PD_SDIO_0 0x1822402e
+#define PD_SDIO_1 0x1822402f
+#define PD_RTC 0x18224034
+#define PD_ADMA_0 0x18224035
+#define PD_ADMA_1 0x18224036
+#define PD_ADMA_2 0x18224037
+#define PD_ADMA_3 0x18224038
+#define PD_ADMA_4 0x18224039
+#define PD_ADMA_5 0x1822403a
+#define PD_ADMA_6 0x1822403b
+#define PD_ADMA_7 0x1822403c
+
+#endif
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index bb6118f79784..f6525ba73eef 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -819,6 +819,7 @@ unsigned long clk_hw_get_flags(const struct clk_hw *hw);
#define clk_hw_can_set_rate_parent(hw) \
(clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
+unsigned int clk_get_children(char *name);
bool clk_hw_is_prepared(const struct clk_hw *hw);
bool clk_hw_rate_is_protected(const struct clk_hw *hw);
bool clk_hw_is_enabled(const struct clk_hw *hw);
diff --git a/include/linux/clk/zynq.h b/include/linux/clk/zynq.h
index a198dd9255a4..d1135756aedf 100644
--- a/include/linux/clk/zynq.h
+++ b/include/linux/clk/zynq.h
@@ -9,6 +9,10 @@
#include <linux/spinlock.h>
+int zynq_clk_suspend_early(void);
+void zynq_clk_resume_late(void);
+void zynq_clk_topswitch_enable(void);
+void zynq_clk_topswitch_disable(void);
void zynq_clock_init(void);
struct clk *clk_register_zynq_pll(const char *name, const char *parent,
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 9cf8f3ce0e50..b630c97ac705 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -255,6 +255,8 @@ struct ablkcipher_alg {
struct blkcipher_alg {
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
+ int (*setkeytype)(struct crypto_tfm *tfm, const u8 *keytype,
+ unsigned int keylen);
int (*encrypt)(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes);
@@ -721,6 +723,8 @@ struct blkcipher_tfm {
void *iv;
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
+ int (*setkeytype)(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes);
int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
@@ -1453,6 +1457,14 @@ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
key, keylen);
}
+static inline int crypto_blkcipher_setkeytype(struct crypto_blkcipher *tfm,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto_blkcipher_crt(tfm)->setkeytype(crypto_blkcipher_tfm(tfm),
+ key, keylen);
+}
+
/**
* crypto_blkcipher_encrypt() - encrypt plaintext
* @desc: reference to the block cipher handle with meta data
diff --git a/include/linux/dma/xilinx_frmbuf.h b/include/linux/dma/xilinx_frmbuf.h
new file mode 100644
index 000000000000..a62b653ff87f
--- /dev/null
+++ b/include/linux/dma/xilinx_frmbuf.h
@@ -0,0 +1,204 @@
+/*
+ * Xilinx Framebuffer DMA support header file
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __XILINX_FRMBUF_DMA_H
+#define __XILINX_FRMBUF_DMA_H
+
+#include <linux/dmaengine.h>
+
+/* Modes to enable early callback */
+#define EARLY_CALLBACK BIT(1) /* To avoid first frame delay */
+#define EARLY_CALLBACK_LOW_LATENCY BIT(2) /* Low latency mode */
+
+/**
+ * enum vid_frmwork_type - Linux video framework type
+ * @XDMA_DRM: fourcc is of type DRM
+ * @XDMA_V4L2: fourcc is of type V4L2
+ */
+enum vid_frmwork_type {
+ XDMA_DRM = 0,
+ XDMA_V4L2,
+};
+
+/**
+ * enum operation_mode - FB IP control register field settings to select mode
+ * @DEFAULT : Use default mode, No explicit bit field settings required.
+ * @AUTO_RESTART : Use auto-restart mode by setting BIT(7) of control register.
+ */
+enum operation_mode {
+ DEFAULT = 0x0,
+ AUTO_RESTART = BIT(7),
+};
+
+#if IS_ENABLED(CONFIG_XILINX_FRMBUF)
+/**
+ * xilinx_xdma_set_mode - Set operation mode for framebuffer IP
+ * @chan: dma channel instance
+ * @mode: Famebuffer IP operation mode.
+ * This routine is used when utilizing "video format aware" Xilinx DMA IP
+ * (such as Video Framebuffer Read or Video Framebuffer Write). This call
+ * must be made prior to dma_async_issue_pending(). This routine should be
+ * called by client driver to set the operation mode for framebuffer IP based
+ * upon the use-case, for e.g. for non-streaming usecases (like MEM2MEM) it's
+ * more appropriate to use default mode unlike streaming usecases where
+ * auto-restart mode is more suitable.
+ *
+ * auto-restart or free running mode.
+ */
+void xilinx_xdma_set_mode(struct dma_chan *chan, enum operation_mode mode);
+
+/**
+ * xilinx_xdma_drm_config - configure video format in video aware DMA
+ * @chan: dma channel instance
+ * @drm_fourcc: DRM fourcc code describing the memory layout of video data
+ *
+ * This routine is used when utilizing "video format aware" Xilinx DMA IP
+ * (such as Video Framebuffer Read or Video Framebuffer Write). This call
+ * must be made prior to dma_async_issue_pending() to establish the video
+ * data memory format within the hardware DMA.
+ */
+void xilinx_xdma_drm_config(struct dma_chan *chan, u32 drm_fourcc);
+
+/**
+ * xilinx_xdma_v4l2_config - configure video format in video aware DMA
+ * @chan: dma channel instance
+ * @v4l2_fourcc: V4L2 fourcc code describing the memory layout of video data
+ *
+ * This routine is used when utilizing "video format aware" Xilinx DMA IP
+ * (such as Video Framebuffer Read or Video Framebuffer Write). This call
+ * must be made prior to dma_async_issue_pending() to establish the video
+ * data memory format within the hardware DMA.
+ */
+void xilinx_xdma_v4l2_config(struct dma_chan *chan, u32 v4l2_fourcc);
+
+/**
+ * xilinx_xdma_get_drm_vid_fmts - obtain list of supported DRM mem formats
+ * @chan: dma channel instance
+ * @fmt_cnt: Output param - total count of supported DRM fourcc codes
+ * @fmts: Output param - pointer to array of DRM fourcc codes (not a copy)
+ *
+ * Return: a reference to an array of DRM fourcc codes supported by this
+ * instance of the Video Framebuffer Driver
+ */
+int xilinx_xdma_get_drm_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts);
+
+/**
+ * xilinx_xdma_get_v4l2_vid_fmts - obtain list of supported V4L2 mem formats
+ * @chan: dma channel instance
+ * @fmt_cnt: Output param - total count of supported V4L2 fourcc codes
+ * @fmts: Output param - pointer to array of V4L2 fourcc codes (not a copy)
+ *
+ * Return: a reference to an array of V4L2 fourcc codes supported by this
+ * instance of the Video Framebuffer Driver
+ */
+int xilinx_xdma_get_v4l2_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts);
+
+/**
+ * xilinx_xdma_get_fid - Get the Field ID of the buffer received.
+ * This function should be called from the callback function registered
+ * per descriptor in prep_interleaved.
+ *
+ * @chan: dma channel instance
+ * @async_tx: descriptor whose parent structure contains fid.
+ * @fid: Output param - Field ID of the buffer. 0 - even, 1 - odd.
+ *
+ * Return: 0 on success, -EINVAL in case of invalid chan
+ */
+int xilinx_xdma_get_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx, u32 *fid);
+
+/**
+ * xilinx_xdma_set_fid - Set the Field ID of the buffer to be transmitted
+ * @chan: dma channel instance
+ * @async_tx: dma async tx descriptor for the buffer
+ * @fid: Field ID of the buffer. 0 - even, 1 - odd.
+ *
+ * Return: 0 on success, -EINVAL in case of invalid chan
+ */
+int xilinx_xdma_set_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx, u32 fid);
+
+/**
+ * xilinx_xdma_get_earlycb - Get info if early callback has been enabled.
+ *
+ * @chan: dma channel instance
+ * @async_tx: descriptor whose parent structure contains fid.
+ * @earlycb: Output param - Early callback mode
+ *
+ * Return: 0 on success, -EINVAL in case of invalid chan
+ */
+int xilinx_xdma_get_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 *earlycb);
+
+/**
+ * xilinx_xdma_set_earlycb - Enable/Disable early callback
+ * @chan: dma channel instance
+ * @async_tx: dma async tx descriptor for the buffer
+ * @earlycb: Enable early callback mode for descriptor
+ *
+ * Return: 0 on success, -EINVAL in case of invalid chan
+ */
+int xilinx_xdma_set_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 earlycb);
+#else
+static inline void xilinx_xdma_drm_config(struct dma_chan *chan, u32 drm_fourcc)
+{ }
+
+static inline void xilinx_xdma_v4l2_config(struct dma_chan *chan,
+ u32 v4l2_fourcc)
+{ }
+
+static int xilinx_xdma_get_drm_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts)
+{
+ return -ENODEV;
+}
+
+static int xilinx_xdma_get_v4l2_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts)
+{
+ return -ENODEV;
+}
+
+static inline int xilinx_xdma_get_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 *fid)
+{
+ return -ENODEV;
+}
+
+static inline int xilinx_xdma_set_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 fid)
+{
+ return -ENODEV;
+}
+
+static inline int xilinx_xdma_get_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *atx,
+ u32 *earlycb)
+{
+ return -ENODEV;
+}
+
+static inline int xilinx_xdma_set_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *atx,
+ u32 earlycb)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /*__XILINX_FRMBUF_DMA_H*/
diff --git a/include/linux/dma/xilinx_ps_pcie_dma.h b/include/linux/dma/xilinx_ps_pcie_dma.h
new file mode 100644
index 000000000000..7c9912bd490e
--- /dev/null
+++ b/include/linux/dma/xilinx_ps_pcie_dma.h
@@ -0,0 +1,69 @@
+/*
+ * Xilinx PS PCIe DMA Engine support header file
+ *
+ * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#ifndef __DMA_XILINX_PS_PCIE_H
+#define __DMA_XILINX_PS_PCIE_H
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+#define XLNX_PLATFORM_DRIVER_NAME "xlnx-platform-dma-driver"
+
+#define ZYNQMP_DMA_DEVID (0xD024)
+#define ZYNQMP_RC_DMA_DEVID (0xD021)
+
+#define MAX_ALLOWED_CHANNELS_IN_HW 4
+
+#define MAX_NUMBER_OF_CHANNELS MAX_ALLOWED_CHANNELS_IN_HW
+
+#define DEFAULT_DMA_QUEUES 4
+#define TWO_DMA_QUEUES 2
+
+#define NUMBER_OF_BUFFER_DESCRIPTORS 1999
+#define MAX_DESCRIPTORS 65536
+
+#define CHANNEL_COAELSE_COUNT 0
+
+#define CHANNEL_POLL_TIMER_FREQUENCY 1000 /* in milli seconds */
+
+#define PCIE_AXI_DIRECTION DMA_TO_DEVICE
+#define AXI_PCIE_DIRECTION DMA_FROM_DEVICE
+
+/**
+ * struct BAR_PARAMS - PCIe Bar Parameters
+ * @BAR_PHYS_ADDR: PCIe BAR Physical address
+ * @BAR_LENGTH: Length of PCIe BAR
+ * @BAR_VIRT_ADDR: Virtual Address to access PCIe BAR
+ */
+struct BAR_PARAMS {
+ dma_addr_t BAR_PHYS_ADDR; /**< Base physical address of BAR memory */
+ unsigned long BAR_LENGTH; /**< Length of BAR memory window */
+ void *BAR_VIRT_ADDR; /**< Virtual Address of mapped BAR memory */
+};
+
+/**
+ * struct ps_pcie_dma_channel_match - Match structure for dma clients
+ * @pci_vendorid: PCIe Vendor id of PS PCIe DMA device
+ * @pci_deviceid: PCIe Device id of PS PCIe DMA device
+ * @board_number: Unique id to identify individual device in a system
+ * @channel_number: Unique channel number of the device
+ * @direction: DMA channel direction
+ * @bar_params: Pointer to BAR_PARAMS for accessing application specific data
+ */
+struct ps_pcie_dma_channel_match {
+ u16 pci_vendorid;
+ u16 pci_deviceid;
+ u16 board_number;
+ u16 channel_number;
+ enum dma_data_direction direction;
+ struct BAR_PARAMS *bar_params;
+};
+
+#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index fa3e8f91b3f5..a187671f695d 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -56,6 +56,7 @@ enum dma_transaction_type {
DMA_MEMSET,
DMA_MEMSET_SG,
DMA_INTERRUPT,
+ DMA_SG,
DMA_PRIVATE,
DMA_ASYNC_TX,
DMA_SLAVE,
@@ -775,6 +776,11 @@ struct dma_device {
unsigned int nents, int value, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
@@ -904,6 +910,19 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
len, flags);
}
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
+ return NULL;
+
+ return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
+ src_sg, src_nents, flags);
+}
+
/**
* dmaengine_terminate_all() - Terminate all active DMA transfers
* @chan: The channel for which to terminate the transfers
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 1262ea6a1f4b..578c28ac5d20 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -13,6 +13,8 @@
#ifndef __FIRMWARE_ZYNQMP_H__
#define __FIRMWARE_ZYNQMP_H__
+#include <linux/device.h>
+
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
@@ -27,20 +29,35 @@
/* SMC SIP service Call Function Identifier Prefix */
#define PM_SIP_SVC 0xC2000000
+
+/* ATF only commands */
#define PM_GET_TRUSTZONE_VERSION 0xa03
#define PM_SET_SUSPEND_MODE 0xa02
#define GET_CALLBACK_DATA 0xa01
+/* Loader commands */
+#define PM_LOAD_PDI 0x701
+
/* Number of 32bits values in payload */
#define PAYLOAD_ARG_CNT 4U
/* Number of arguments for a callback */
-#define CB_ARG_CNT 4
+#define CB_ARG_CNT 4
/* Payload size (consists of callback API ID + arguments) */
-#define CB_PAYLOAD_SIZE (CB_ARG_CNT + 1)
+#define CB_PAYLOAD_SIZE (CB_ARG_CNT + 1)
-#define ZYNQMP_PM_MAX_QOS 100U
+#define ZYNQMP_PM_MAX_LATENCY (~0U)
+#define ZYNQMP_PM_MAX_QOS 100U
+
+/* Usage status, returned by PmGetNodeStatus */
+#define PM_USAGE_NO_MASTER 0x0U
+#define PM_USAGE_CURRENT_MASTER 0x1U
+#define PM_USAGE_OTHER_MASTER 0x2U
+#define PM_USAGE_BOTH_MASTERS (PM_USAGE_CURRENT_MASTER | \
+ PM_USAGE_OTHER_MASTER)
+
+#define GSS_NUM_REGS (4)
/* Node capabilities */
#define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U
@@ -56,19 +73,54 @@
#define XILINX_ZYNQMP_PM_FPGA_FULL 0x0U
#define XILINX_ZYNQMP_PM_FPGA_PARTIAL BIT(0)
+/* Feature check status */
+#define PM_FEATURE_INVALID -1
+#define PM_FEATURE_UNCHECKED 0
+
enum pm_api_id {
PM_GET_API_VERSION = 1,
- PM_REQUEST_NODE = 13,
+ PM_SET_CONFIGURATION,
+ PM_GET_NODE_STATUS,
+ PM_GET_OPERATING_CHARACTERISTIC,
+ PM_REGISTER_NOTIFIER,
+ /* API for suspending of PUs: */
+ PM_REQUEST_SUSPEND,
+ PM_SELF_SUSPEND,
+ PM_FORCE_POWERDOWN,
+ PM_ABORT_SUSPEND,
+ PM_REQUEST_WAKEUP,
+ PM_SET_WAKEUP_SOURCE,
+ PM_SYSTEM_SHUTDOWN,
+ /* API for managing PM slaves: */
+ PM_REQUEST_NODE,
PM_RELEASE_NODE,
PM_SET_REQUIREMENT,
- PM_RESET_ASSERT = 17,
+ PM_SET_MAX_LATENCY,
+ /* Direct control API functions: */
+ PM_RESET_ASSERT,
PM_RESET_GET_STATUS,
- PM_PM_INIT_FINALIZE = 21,
+ PM_MMIO_WRITE,
+ PM_MMIO_READ,
+ PM_PM_INIT_FINALIZE,
PM_FPGA_LOAD,
PM_FPGA_GET_STATUS,
PM_GET_CHIPID = 24,
- PM_IOCTL = 34,
+ /* ID 25 is been used by U-boot to process secure boot images */
+ /* Secure library generic API functions */
+ PM_SECURE_SHA = 26,
+ PM_SECURE_RSA,
+ /* Pin control API functions */
+ PM_PINCTRL_REQUEST,
+ PM_PINCTRL_RELEASE,
+ PM_PINCTRL_GET_FUNCTION,
+ PM_PINCTRL_SET_FUNCTION,
+ PM_PINCTRL_CONFIG_PARAM_GET,
+ PM_PINCTRL_CONFIG_PARAM_SET,
+ /* PM IOCTL API */
+ PM_IOCTL,
+ /* API to query information from firmware */
PM_QUERY_DATA,
+ /* Clock control API functions */
PM_CLOCK_ENABLE,
PM_CLOCK_DISABLE,
PM_CLOCK_GETSTATE,
@@ -78,24 +130,52 @@ enum pm_api_id {
PM_CLOCK_GETRATE,
PM_CLOCK_SETPARENT,
PM_CLOCK_GETPARENT,
+ PM_SECURE_IMAGE,
+ PM_FPGA_READ = 46,
+ PM_SECURE_AES,
+ /* PM_REGISTER_ACCESS API */
+ PM_REGISTER_ACCESS = 52,
+ PM_EFUSE_ACCESS,
+ PM_FEATURE_CHECK = 63,
+ PM_API_MAX,
};
/* PMU-FW return status codes */
enum pm_ret_status {
XST_PM_SUCCESS = 0,
+ XST_PM_NO_FEATURE = 19,
XST_PM_INTERNAL = 2000,
XST_PM_CONFLICT,
XST_PM_NO_ACCESS,
XST_PM_INVALID_NODE,
XST_PM_DOUBLE_REQ,
XST_PM_ABORT_SUSPEND,
+ XST_PM_MULT_USER = 2008,
};
enum pm_ioctl_id {
- IOCTL_SET_PLL_FRAC_MODE = 8,
+ IOCTL_GET_RPU_OPER_MODE,
+ IOCTL_SET_RPU_OPER_MODE,
+ IOCTL_RPU_BOOT_ADDR_CONFIG,
+ IOCTL_TCM_COMB_CONFIG,
+ IOCTL_SET_TAPDELAY_BYPASS,
+ IOCTL_SET_SGMII_MODE,
+ IOCTL_SD_DLL_RESET,
+ IOCTL_SET_SD_TAPDELAY,
+ /* Ioctl for clock driver */
+ IOCTL_SET_PLL_FRAC_MODE,
IOCTL_GET_PLL_FRAC_MODE,
IOCTL_SET_PLL_FRAC_DATA,
IOCTL_GET_PLL_FRAC_DATA,
+ IOCTL_WRITE_GGS,
+ IOCTL_READ_GGS,
+ IOCTL_WRITE_PGGS,
+ IOCTL_READ_PGGS,
+ /* IOCTL for ULPI reset */
+ IOCTL_ULPI_RESET,
+ /* Set healthy bit value*/
+ IOCTL_SET_BOOT_HEALTH_STATUS,
+ IOCTL_AFI,
};
enum pm_query_id {
@@ -105,7 +185,14 @@ enum pm_query_id {
PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
PM_QID_CLOCK_GET_PARENTS,
PM_QID_CLOCK_GET_ATTRIBUTES,
- PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
+ PM_QID_PINCTRL_GET_NUM_PINS,
+ PM_QID_PINCTRL_GET_NUM_FUNCTIONS,
+ PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS,
+ PM_QID_PINCTRL_GET_FUNCTION_NAME,
+ PM_QID_PINCTRL_GET_FUNCTION_GROUPS,
+ PM_QID_PINCTRL_GET_PIN_GROUPS,
+ PM_QID_CLOCK_GET_NUM_CLOCKS,
+ PM_QID_CLOCK_GET_MAX_DIVISOR,
};
enum zynqmp_pm_reset_action {
@@ -239,6 +326,13 @@ enum zynqmp_pm_reset {
ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3
};
+enum zynqmp_pm_abort_reason {
+ ZYNQMP_PM_ABORT_REASON_WAKEUP_EVENT = 100,
+ ZYNQMP_PM_ABORT_REASON_POWER_UNIT_BUSY,
+ ZYNQMP_PM_ABORT_REASON_NO_POWERDOWN,
+ ZYNQMP_PM_ABORT_REASON_UNKNOWN,
+};
+
enum zynqmp_pm_suspend_reason {
SUSPEND_POWER_REQUEST = 201,
SUSPEND_ALERT,
@@ -251,6 +345,191 @@ enum zynqmp_pm_request_ack {
ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING,
};
+enum zynqmp_pm_opchar_type {
+ ZYNQMP_PM_OPERATING_CHARACTERISTIC_POWER = 1,
+ ZYNQMP_PM_OPERATING_CHARACTERISTIC_ENERGY,
+ ZYNQMP_PM_OPERATING_CHARACTERISTIC_TEMPERATURE,
+};
+
+enum zynqmp_pm_shutdown_type {
+ ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN,
+ ZYNQMP_PM_SHUTDOWN_TYPE_RESET,
+ ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+};
+
+enum zynqmp_pm_shutdown_subtype {
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+};
+
+enum pm_node_id {
+ NODE_UNKNOWN = 0,
+ NODE_APU,
+ NODE_APU_0,
+ NODE_APU_1,
+ NODE_APU_2,
+ NODE_APU_3,
+ NODE_RPU,
+ NODE_RPU_0,
+ NODE_RPU_1,
+ NODE_PLD,
+ NODE_FPD,
+ NODE_OCM_BANK_0,
+ NODE_OCM_BANK_1,
+ NODE_OCM_BANK_2,
+ NODE_OCM_BANK_3,
+ NODE_TCM_0_A,
+ NODE_TCM_0_B,
+ NODE_TCM_1_A,
+ NODE_TCM_1_B,
+ NODE_L2,
+ NODE_GPU_PP_0,
+ NODE_GPU_PP_1,
+ NODE_USB_0,
+ NODE_USB_1,
+ NODE_TTC_0,
+ NODE_TTC_1,
+ NODE_TTC_2,
+ NODE_TTC_3,
+ NODE_SATA,
+ NODE_ETH_0,
+ NODE_ETH_1,
+ NODE_ETH_2,
+ NODE_ETH_3,
+ NODE_UART_0,
+ NODE_UART_1,
+ NODE_SPI_0,
+ NODE_SPI_1,
+ NODE_I2C_0,
+ NODE_I2C_1,
+ NODE_SD_0,
+ NODE_SD_1,
+ NODE_DP,
+ NODE_GDMA,
+ NODE_ADMA,
+ NODE_NAND,
+ NODE_QSPI,
+ NODE_GPIO,
+ NODE_CAN_0,
+ NODE_CAN_1,
+ NODE_EXTERN,
+ NODE_APLL,
+ NODE_VPLL,
+ NODE_DPLL,
+ NODE_RPLL,
+ NODE_IOPLL,
+ NODE_DDR,
+ NODE_IPI_APU,
+ NODE_IPI_RPU_0,
+ NODE_GPU,
+ NODE_PCIE,
+ NODE_PCAP,
+ NODE_RTC,
+ NODE_LPD,
+ NODE_VCU,
+ NODE_IPI_RPU_1,
+ NODE_IPI_PL_0,
+ NODE_IPI_PL_1,
+ NODE_IPI_PL_2,
+ NODE_IPI_PL_3,
+ NODE_PL,
+ NODE_GEM_TSU,
+ NODE_SWDT_0,
+ NODE_SWDT_1,
+ NODE_CSU,
+ NODE_PJTAG,
+ NODE_TRACE,
+ NODE_TESTSCAN,
+ NODE_PMU,
+ NODE_MAX,
+};
+
+enum pm_pinctrl_config_param {
+ PM_PINCTRL_CONFIG_SLEW_RATE,
+ PM_PINCTRL_CONFIG_BIAS_STATUS,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS,
+ PM_PINCTRL_CONFIG_MAX,
+};
+
+enum pm_pinctrl_slew_rate {
+ PM_PINCTRL_SLEW_RATE_FAST,
+ PM_PINCTRL_SLEW_RATE_SLOW,
+};
+
+enum pm_pinctrl_bias_status {
+ PM_PINCTRL_BIAS_DISABLE,
+ PM_PINCTRL_BIAS_ENABLE,
+};
+
+enum pm_pinctrl_pull_ctrl {
+ PM_PINCTRL_BIAS_PULL_DOWN,
+ PM_PINCTRL_BIAS_PULL_UP,
+};
+
+enum pm_pinctrl_schmitt_cmos {
+ PM_PINCTRL_INPUT_TYPE_CMOS,
+ PM_PINCTRL_INPUT_TYPE_SCHMITT,
+};
+
+enum pm_pinctrl_drive_strength {
+ PM_PINCTRL_DRIVE_STRENGTH_2MA,
+ PM_PINCTRL_DRIVE_STRENGTH_4MA,
+ PM_PINCTRL_DRIVE_STRENGTH_8MA,
+ PM_PINCTRL_DRIVE_STRENGTH_12MA,
+};
+
+enum rpu_oper_mode {
+ PM_RPU_MODE_LOCKSTEP,
+ PM_RPU_MODE_SPLIT,
+};
+
+enum rpu_boot_mem {
+ PM_RPU_BOOTMEM_LOVEC,
+ PM_RPU_BOOTMEM_HIVEC,
+};
+
+enum rpu_tcm_comb {
+ PM_RPU_TCM_SPLIT,
+ PM_RPU_TCM_COMB,
+};
+
+enum tap_delay_signal_type {
+ PM_TAPDELAY_NAND_DQS_IN,
+ PM_TAPDELAY_NAND_DQS_OUT,
+ PM_TAPDELAY_QSPI,
+ PM_TAPDELAY_MAX,
+};
+
+enum tap_delay_bypass_ctrl {
+ PM_TAPDELAY_BYPASS_DISABLE,
+ PM_TAPDELAY_BYPASS_ENABLE,
+};
+
+enum sgmii_mode {
+ PM_SGMII_DISABLE,
+ PM_SGMII_ENABLE,
+};
+
+enum tap_delay_type {
+ PM_TAPDELAY_INPUT,
+ PM_TAPDELAY_OUTPUT,
+};
+
+enum dll_reset_type {
+ PM_DLL_RESET_ASSERT,
+ PM_DLL_RESET_RELEASE,
+ PM_DLL_RESET_PULSE,
+};
+
+enum pm_register_access_id {
+ CONFIG_REG_WRITE,
+ CONFIG_REG_READ,
+};
+
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
@@ -270,6 +549,8 @@ struct zynqmp_eemi_ops {
int (*get_chipid)(u32 *idcode, u32 *version);
int (*fpga_load)(const u64 address, const u32 size, const u32 flags);
int (*fpga_get_status)(u32 *value);
+ int (*fpga_read)(const u32 reg_numframes, const u64 phys_address,
+ u32 readback_type, u32 *value);
int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
int (*clock_enable)(u32 clock_id);
int (*clock_disable)(u32 clock_id);
@@ -295,11 +576,48 @@ struct zynqmp_eemi_ops {
const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack);
+ int (*sha_hash)(const u64 address, const u32 size, const u32 flags);
+ int (*rsa)(const u64 address, const u32 size, const u32 flags);
+ int (*request_suspend)(const u32 node,
+ const enum zynqmp_pm_request_ack ack,
+ const u32 latency,
+ const u32 state);
+ int (*force_powerdown)(const u32 target,
+ const enum zynqmp_pm_request_ack ack);
+ int (*request_wakeup)(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack);
+ int (*set_wakeup_source)(const u32 target,
+ const u32 wakeup_node,
+ const u32 enable);
+ int (*system_shutdown)(const u32 type, const u32 subtype);
+ int (*set_max_latency)(const u32 node, const u32 latency);
+ int (*set_configuration)(const u32 physical_addr);
+ int (*get_node_status)(const u32 node, u32 *const status,
+ u32 *const requirements, u32 *const usage);
+ int (*get_operating_characteristic)(const u32 node,
+ const enum zynqmp_pm_opchar_type
+ type, u32 *const result);
+ int (*pinctrl_request)(const u32 pin);
+ int (*pinctrl_release)(const u32 pin);
+ int (*pinctrl_get_function)(const u32 pin, u32 *id);
+ int (*pinctrl_set_function)(const u32 pin, const u32 id);
+ int (*pinctrl_get_config)(const u32 pin, const u32 param, u32 *value);
+ int (*pinctrl_set_config)(const u32 pin, const u32 param, u32 value);
+ int (*register_access)(u32 register_access_id, u32 address,
+ u32 mask, u32 value, u32 *out);
+ int (*aes)(const u64 address, u32 *out);
+ int (*efuse_access)(const u64 address, u32 *out);
+ int (*secure_image)(const u64 src_addr, u64 key_addr, u64 *dst);
+ int (*pdi_load)(const u32 src, const u64 address);
};
int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
u32 arg2, u32 arg3, u32 *ret_payload);
+int zynqmp_pm_ggs_init(struct kobject *parent_kobj);
+
#if IS_REACHABLE(CONFIG_ARCH_ZYNQMP)
const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void);
#else
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index e8ca62b2cb5b..985ede34330b 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -11,6 +11,9 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#define ENCRYPTED_KEY_LEN 64 /* Bytes */
+#define ENCRYPTED_IV_LEN 24 /* Bytes */
+
struct fpga_manager;
struct sg_table;
@@ -73,7 +76,6 @@ enum fpga_mgr_states {
#define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2)
#define FPGA_MGR_BITSTREAM_LSB_FIRST BIT(3)
#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4)
-
/**
* struct fpga_image_info - information specific to a FPGA image
* @flags: boolean flags as defined above
@@ -82,6 +84,8 @@ enum fpga_mgr_states {
* @config_complete_timeout_us: maximum time for FPGA to switch to operating
* status in the write_complete op.
* @firmware_name: name of FPGA image firmware file
+ * @key: key value useful for Encrypted Bitstream loading to read the userkey
+ * @iv: iv (or) initialization vector is useful for Encrypted Bitstream loading
* @sgt: scatter/gather table containing FPGA image
* @buf: contiguous buffer containing FPGA image
* @count: size of buf
@@ -95,6 +99,8 @@ struct fpga_image_info {
u32 disable_timeout_us;
u32 config_complete_timeout_us;
char *firmware_name;
+ char key[ENCRYPTED_KEY_LEN];
+ char iv[ENCRYPTED_IV_LEN];
struct sg_table *sgt;
const char *buf;
size_t count;
@@ -114,6 +120,7 @@ struct fpga_image_info {
* @write: write count bytes of configuration data to the FPGA
* @write_sg: write the scatter list of configuration data to the FPGA
* @write_complete: set FPGA to operating state after writing is done
+ * @read: optional: read FPGA configuration information
* @fpga_remove: optional: Set FPGA into a specific state during driver remove
* @groups: optional attribute groups.
*
@@ -132,6 +139,7 @@ struct fpga_manager_ops {
int (*write_sg)(struct fpga_manager *mgr, struct sg_table *sgt);
int (*write_complete)(struct fpga_manager *mgr,
struct fpga_image_info *info);
+ int (*read)(struct fpga_manager *mgr, struct seq_file *s);
void (*fpga_remove)(struct fpga_manager *mgr);
const struct attribute_group **groups;
};
@@ -157,21 +165,31 @@ struct fpga_compat_id {
/**
* struct fpga_manager - fpga manager structure
* @name: name of low level fpga manager
+ * @flags: flags determines the type of Bitstream
+ * @key: key value useful for Encrypted Bitstream loading to read the userkey
+ * @iv: iv (or) initialization vector is useful for Encrypted Bitstream loading
* @dev: fpga manager device
* @ref_mutex: only allows one reference to fpga manager
* @state: state of fpga manager
* @compat_id: FPGA manager id for compatibility check.
* @mops: pointer to struct of fpga manager ops
* @priv: low level driver private date
+ * @dir: debugfs image directory
*/
struct fpga_manager {
const char *name;
+ long int flags;
+ char key[ENCRYPTED_KEY_LEN];
+ char iv[ENCRYPTED_IV_LEN];
struct device dev;
struct mutex ref_mutex;
enum fpga_mgr_states state;
struct fpga_compat_id *compat_id;
const struct fpga_manager_ops *mops;
void *priv;
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+ struct dentry *dir;
+#endif
};
#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 316087da1d09..8013314c82f9 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -134,6 +134,9 @@ struct gic_chip_data;
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
int gic_cpu_if_down(unsigned int gic_nr);
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
+
+void gic_set_cpu(unsigned int cpu, unsigned int irq);
void gic_cpu_save(struct gic_chip_data *gic);
void gic_cpu_restore(struct gic_chip_data *gic);
void gic_dist_save(struct gic_chip_data *gic);
diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h
index 9542b41eacfd..d9f0a7471b51 100644
--- a/include/linux/mailbox/zynqmp-ipi-message.h
+++ b/include/linux/mailbox/zynqmp-ipi-message.h
@@ -3,6 +3,9 @@
#ifndef _LINUX_ZYNQMP_IPI_MESSAGE_H_
#define _LINUX_ZYNQMP_IPI_MESSAGE_H_
+/* IPI buffer MAX length */
+#define IPI_BUF_LEN_MAX 32U
+
/**
* struct zynqmp_ipi_message - ZynqMP IPI message structure
* @len: Length of message
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 936a3fdb48b5..0341b38c152d 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -72,7 +72,8 @@ struct mtd_oob_ops {
};
#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
-#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
+#define MTD_MAX_ECCPOS_ENTRIES_LARGE 1260
+
/**
* struct mtd_oob_region - oob region definition
* @offset: region offset
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
index 339ac798568e..0837ab74b2e7 100644
--- a/include/linux/mtd/onfi.h
+++ b/include/linux/mtd/onfi.h
@@ -161,8 +161,10 @@ struct onfi_ext_param_page {
* @tR: Page read time
* @tCCS: Change column setup time
* @async_timing_mode: Supported asynchronous timing mode
+ * @src_sync_timing_mode: Supported synchronous timing mode
* @vendor_revision: Vendor specific revision number
* @vendor: Vendor specific data
+ * @jedec_id: Jedec ID of nand flash device
*/
struct onfi_params {
int version;
@@ -171,8 +173,10 @@ struct onfi_params {
u16 tR;
u16 tCCS;
u16 async_timing_mode;
+ u16 src_sync_timing_mode;
u16 vendor_revision;
u8 vendor[88];
+ u8 jedec_id;
};
#endif /* __LINUX_MTD_ONFI_H */
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index ac3884a28dea..08437edb1a42 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -438,6 +438,7 @@ struct nand_ecc_ctrl {
* @tWHR_min: WE# high to RE# low
* @tWP_min: WE# pulse width
* @tWW_min: WP# transition to WE# low
+ * @mode: sdr timing mode value
*/
struct nand_sdr_timings {
u64 tBERS_max;
@@ -478,6 +479,7 @@ struct nand_sdr_timings {
u32 tWHR_min;
u32 tWP_min;
u32 tWW_min;
+ u8 mode;
};
/**
@@ -1369,4 +1371,13 @@ static inline void *nand_get_data_buf(struct nand_chip *chip)
return chip->data_buf;
}
+/* return the supported synchronous timing mode. */
+static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
+{
+ if (!chip->parameters.onfi)
+ return ONFI_TIMING_MODE_UNKNOWN;
+
+ return le16_to_cpu(chip->parameters.onfi->src_sync_timing_mode);
+}
+
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 9f57cdfcc93d..2856030ff94a 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -25,6 +25,7 @@
#define SNOR_MFR_SPANSION CFI_MFR_AMD
#define SNOR_MFR_SST CFI_MFR_SST
#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */
+#define SNOR_MFR_ISSI CFI_MFR_PMC
/*
* Note on opcode nomenclature: some opcodes have a format like
@@ -98,6 +99,8 @@
#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
+#define GLOBAL_BLKPROT_UNLK 0x98 /* Clear global write protection bits */
+
/* Used for S3AN flashes only */
#define SPINOR_OP_XSE 0x50 /* Sector erase */
#define SPINOR_OP_XPP 0x82 /* Page program */
@@ -106,13 +109,13 @@
#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
#define XSR_RDY BIT(7) /* Ready */
-
/* Used for Macronix and Winbond flashes. */
#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */
/* Used for Spansion flashes only. */
#define SPINOR_OP_BRWR 0x17 /* Bank register write */
+#define SPINOR_OP_BRRD 0x16 /* Bank register read */
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
/* Used for Micron flashes only. */
@@ -126,8 +129,17 @@
#define SR_BP0 BIT(2) /* Block protect 0 */
#define SR_BP1 BIT(3) /* Block protect 1 */
#define SR_BP2 BIT(4) /* Block protect 2 */
+#define SR_BP_BIT_OFFSET 2 /* Offset to Block protect 0 */
+#define SR_BP_BIT_MASK (SR_BP2 | SR_BP1 | SR_BP0)
+
#define SR_TB BIT(5) /* Top/Bottom protect */
#define SR_SRWD BIT(7) /* SR write protect */
+#define SR_BP3 0x40
+/* Bit to determine whether protection starts from top or bottom */
+#define SR_BP_TB 0x20
+#define BP_BITS_FROM_SR(sr) (((sr) & SR_BP_BIT_MASK) >> SR_BP_BIT_OFFSET)
+#define M25P_MAX_LOCKABLE_SECTORS 64
+
/* Spansion/Cypress specific status bits */
#define SR_E_ERR BIT(5)
#define SR_P_ERR BIT(6)
@@ -146,6 +158,16 @@
/* Configuration Register bits. */
#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
+/* Extended/Bank Address Register bits */
+#define EAR_SEGMENT_MASK 0x7 /* 128 Mb segment mask */
+
+enum read_mode {
+ SPI_NOR_NORMAL = 0,
+ SPI_NOR_FAST,
+ SPI_NOR_DUAL,
+ SPI_NOR_QUAD,
+};
+
/* Status Register 2 bits. */
#define SR2_QUAD_EN_BIT7 BIT(7)
@@ -382,6 +404,7 @@ struct spi_nor {
struct mtd_info mtd;
struct mutex lock;
struct device *dev;
+ struct spi_device *spi;
const struct flash_info *info;
u32 page_size;
u8 addr_width;
@@ -389,13 +412,22 @@ struct spi_nor {
u8 read_opcode;
u8 read_dummy;
u8 program_opcode;
+ enum read_mode flash_read;
+ u32 jedec_id;
+ u16 curbank;
+ u16 n_sectors;
+ u32 sector_size;
enum spi_nor_protocol read_proto;
enum spi_nor_protocol write_proto;
enum spi_nor_protocol reg_proto;
bool sst_write_second;
+ bool shift;
+ bool isparallel;
+ bool isstacked;
u32 flags;
u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
struct spi_nor_erase_map erase_map;
+ bool is_lock;
int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
@@ -536,6 +568,15 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps);
/**
+ * spi_nor_shutdown() - prepare for reboot
+ * @nor: the spi_nor structure
+ *
+ * The drivers can use this fuction to get the address back to
+ * 0 as will be required for a ROM boot.
+ */
+void spi_nor_shutdown(struct spi_nor *nor);
+
+/**
* spi_nor_restore_addr_mode() - restore the status of SPI NOR
* @nor: the spi_nor structure
*/
diff --git a/include/linux/of.h b/include/linux/of.h
index 0cf857012f11..682295399fbb 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1346,6 +1346,8 @@ enum of_reconfig_change {
};
#ifdef CONFIG_OF_DYNAMIC
+#include <linux/slab.h>
+
extern int of_reconfig_notifier_register(struct notifier_block *);
extern int of_reconfig_notifier_unregister(struct notifier_block *);
extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd);
@@ -1389,6 +1391,23 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
{
return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
}
+
+struct device_node *of_changeset_create_device_nodev(
+ struct of_changeset *ocs, struct device_node *parent,
+ const char *fmt, va_list vargs);
+
+__printf(3, 4) struct device_node *
+of_changeset_create_device_node(struct of_changeset *ocs,
+ struct device_node *parent, const char *fmt, ...);
+
+int __of_changeset_add_update_property_copy(struct of_changeset *ocs,
+ struct device_node *np, const char *name, const void *value,
+ int length, bool update);
+
+int __of_changeset_add_update_property_string_list(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char **strs, int count, bool update);
+
#else /* CONFIG_OF_DYNAMIC */
static inline int of_reconfig_notifier_register(struct notifier_block *nb)
{
@@ -1408,8 +1427,322 @@ static inline int of_reconfig_get_state_change(unsigned long action,
{
return -EINVAL;
}
+
+static inline struct device_node *of_changeset_create_device_nodev(
+ struct of_changeset *ocs, struct device_node *parent,
+ const char *fmt, va_list vargs)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline __printf(3, 4) struct device_node *
+of_changeset_create_device_node(struct of_changeset *ocs,
+ struct device_node *parent, const char *fmt, ...)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int __of_changeset_add_update_property_copy(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const void *value, int length, bool update)
+{
+ return -EINVAL;
+}
+
+static inline __printf(4, 5) int of_changeset_add_property_stringf(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char *fmt, ...)
+{
+ return -EINVAL;
+}
+
+static inline int of_changeset_update_property_stringf(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char *fmt, ...)
+{
+ return -EINVAL;
+}
+
+static inline int __of_changeset_add_update_property_string_list(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char **strs, int count, bool update)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_OF_DYNAMIC */
+#ifdef CONFIG_OF_DYNAMIC
+/**
+ * of_changeset_add_property_copy - Create a new property copying name & value
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @value: pointer to the value data
+ * @length: length of the value in bytes
+ *
+ * Adds a property to the changeset by making copies of the name & value
+ * entries.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_add_property_copy(struct of_changeset *ocs,
+ struct device_node *np, const char *name,
+ const void *value, int length)
+{
+ return __of_changeset_add_update_property_copy(ocs, np, name, value,
+ length, false);
+}
+
+/**
+ * of_changeset_update_property_copy - Update a property copying name & value
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @value: pointer to the value data
+ * @length: length of the value in bytes
+ *
+ * Update a property to the changeset by making copies of the name & value
+ * entries.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_update_property_copy(struct of_changeset *ocs,
+ struct device_node *np, const char *name,
+ const void *value, int length)
+{
+ return __of_changeset_add_update_property_copy(ocs, np, name, value,
+ length, true);
+}
+
+/**
+ * __of_changeset_add_update_property_string - Create/update a string property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @str: string property value
+ * @update: True on update operation
+ *
+ * Adds/updates a string property to the changeset by making copies of the name
+ * and the given value. The @update parameter controls whether an add or
+ * update takes place.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int __of_changeset_add_update_property_string(
+ struct of_changeset *ocs, struct device_node *np, const char *name,
+ const char *str, bool update)
+{
+ return __of_changeset_add_update_property_copy(ocs, np, name, str,
+ strlen(str) + 1, update);
+}
+
+/**
+ * __of_changeset_add_update_property_stringv - Create/update a formatted
+ * string property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @fmt: format of string property
+ * @vargs: arguments of the format string
+ * @update: True on update operation
+ *
+ * Adds/updates a string property to the changeset by making copies of the name
+ * and the formatted value. The @update parameter controls whether an add or
+ * update takes place.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int __of_changeset_add_update_property_stringv(
+ struct of_changeset *ocs, struct device_node *np, const char *name,
+ const char *fmt, va_list vargs, bool update)
+{
+ char *str;
+ int ret;
+
+ str = kvasprintf(GFP_KERNEL, fmt, vargs);
+ if (!str)
+ return -ENOMEM;
+ ret = __of_changeset_add_update_property_string(ocs, np, name, str,
+ update);
+ kfree(str);
+
+ return ret;
+}
+
+/**
+ * of_changeset_add_property_string_list - Create a new string list property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @strs: pointer to the string list
+ * @count: string count
+ *
+ * Adds a string list property to the changeset.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_add_property_string_list(
+ struct of_changeset *ocs, struct device_node *np, const char *name,
+ const char **strs, int count)
+{
+ return __of_changeset_add_update_property_string_list(ocs, np, name,
+ strs, count, false);
+}
+
+/**
+ * of_changeset_update_property_string_list - Update string list property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @strs: pointer to the string list
+ * @count: string count
+ *
+ * Updates a string list property to the changeset.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_update_property_string_list(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char **strs, int count)
+{
+ return __of_changeset_add_update_property_string_list(ocs, np, name,
+ strs, count, true);
+}
+
+/**
+ * of_changeset_add_property_string - Adds a string property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @str: string property
+ *
+ * Adds a string property to the changeset by making copies of the name
+ * and the string value.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_add_property_string(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char *str)
+{
+ return __of_changeset_add_update_property_string(ocs, np, name, str,
+ false);
+}
+
+/**
+ * of_changeset_update_property_string - Update a string property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @str: string property
+ *
+ * Updates a string property to the changeset by making copies of the name
+ * and the string value.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_update_property_string(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, const char *str)
+{
+ return __of_changeset_add_update_property_string(ocs, np, name, str,
+ true);
+}
+
+/**
+ * of_changeset_add_property_u32 - Create a new u32 property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @val: value in host endian format
+ *
+ * Adds a u32 property to the changeset.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_add_property_u32(struct of_changeset *ocs,
+ struct device_node *np, const char *name, u32 val)
+{
+ val = cpu_to_be32(val);
+ return __of_changeset_add_update_property_copy(ocs, np, name, &val,
+ sizeof(val), false);
+}
+
+/**
+ * of_changeset_update_property_u32 - Update u32 property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ * @val: value in host endian format
+ *
+ * Updates a u32 property to the changeset.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_update_property_u32(
+ struct of_changeset *ocs, struct device_node *np,
+ const char *name, u32 val)
+{
+ val = cpu_to_be32(val);
+ return __of_changeset_add_update_property_copy(ocs, np, name, &val,
+ sizeof(val), true);
+}
+
+/**
+ * of_changeset_add_property_bool - Create a new u32 property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ *
+ * Adds a bool property to the changeset. Note that there is
+ * no option to set the value to false, since the property
+ * existing sets it to true.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_add_property_bool(
+ struct of_changeset *ocs, struct device_node *np, const char *name)
+{
+ return __of_changeset_add_update_property_copy(ocs, np, name, "", 0,
+ false);
+}
+
+/**
+ * of_changeset_update_property_bool - Update a bool property
+ *
+ * @ocs: changeset pointer
+ * @np: device node pointer
+ * @name: name of the property
+ *
+ * Updates a property to the changeset. Note that there is
+ * no option to set the value to false, since the property
+ * existing sets it to true.
+ *
+ * Returns zero on success, a negative error value otherwise.
+ */
+static inline int of_changeset_update_property_bool(struct of_changeset *ocs,
+ struct device_node *np, const char *name)
+{
+ return __of_changeset_add_update_property_copy(ocs, np, name, "", 0,
+ true);
+}
+#endif
+
+/* CONFIG_OF_RESOLVE api */
+extern int of_resolve_phandles(struct device_node *tree);
+
/**
* of_device_is_system_power_controller - Tells if system-power-controller is found for device_node
* @np: Pointer to the given device_node
diff --git a/include/linux/phy/phy-zynqmp.h b/include/linux/phy/phy-zynqmp.h
new file mode 100644
index 000000000000..0a25fa85527e
--- /dev/null
+++ b/include/linux/phy/phy-zynqmp.h
@@ -0,0 +1,60 @@
+/*
+ * Xilinx ZynqMP PHY header
+ *
+ * Copyright (C) 2016 Xilinx, Inc.
+ *
+ * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PHY_ZYNQMP_H_
+#define _PHY_ZYNQMP_H_
+
+struct phy;
+
+#if defined(CONFIG_PHY_XILINX_ZYNQMP)
+
+extern int xpsgtr_override_deemph(struct phy *phy, u8 plvl, u8 vlvl);
+extern int xpsgtr_margining_factor(struct phy *phy, u8 plvl, u8 vlvl);
+extern int xpsgtr_wait_pll_lock(struct phy *phy);
+int xpsgtr_usb_crst_assert(struct phy *phy);
+int xpsgtr_usb_crst_release(struct phy *phy);
+#else
+
+static inline int xpsgtr_override_deemph(struct phy *base, u8 plvl, u8 vlvl)
+{
+ return -ENODEV;
+}
+
+static inline int xpsgtr_margining_factor(struct phy *base, u8 plvl, u8 vlvl)
+{
+ return -ENODEV;
+}
+
+extern inline int xpsgtr_wait_pll_lock(struct phy *phy)
+{
+ return -ENODEV;
+}
+
+extern inline int xpsgtr_usb_crst_assert(struct phy *phy)
+{
+ return -ENODEV;
+}
+
+extern inline int xpsgtr_usb_crst_release(struct phy *phy)
+{
+ return -ENODEV;
+}
+
+#endif
+
+#endif /* _PHY_ZYNQMP_H_ */
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 04d04709f2bd..2c911b7f646d 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -344,6 +344,8 @@ struct firmware;
* @start: power on the device and boot it
* @stop: power off the device
* @kick: kick a virtqueue (virtqueue id given as a parameter)
+ * @peek_remote_kick: check if remote has kicked
+ * @ack_remote_kick: ack remote kick
* @da_to_va: optional platform hook to perform address translations
* @parse_fw: parse firmware to extract information (e.g. resource table)
* @find_loaded_rsc_table: find the loaded resouce table
@@ -356,6 +358,8 @@ struct rproc_ops {
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
+ bool (*peek_remote_kick)(struct rproc *rproc, char *buf, size_t *len);
+ void (*ack_remote_kick)(struct rproc *rproc);
void * (*da_to_va)(struct rproc *rproc, u64 da, int len);
int (*parse_fw)(struct rproc *rproc, const struct firmware *fw);
struct resource_table *(*find_loaded_rsc_table)(
@@ -461,6 +465,7 @@ struct rproc_dump_segment {
* @auto_boot: flag to indicate if remote processor should be auto-started
* @dump_segments: list of segments in the firmware
* @nb_vdev: number of vdev currently handled by rproc
+ * @sysfs_kick: allow kick remoteproc from sysfs
*/
struct rproc {
struct list_head node;
@@ -494,6 +499,7 @@ struct rproc {
bool auto_boot;
struct list_head dump_segments;
int nb_vdev;
+ int sysfs_kick;
};
/**
diff --git a/include/linux/soc/xilinx/zynqmp/fw.h b/include/linux/soc/xilinx/zynqmp/fw.h
new file mode 100644
index 000000000000..98165fa2d1ca
--- /dev/null
+++ b/include/linux/soc/xilinx/zynqmp/fw.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ */
+
+#ifndef __SOC_ZYNQMP_FW_H__
+#define __SOC_ZYNQMP_FW_H__
+
+#include <linux/nvmem-consumer.h>
+
+enum {
+ ZYNQMP_SILICON_V1 = 0,
+ ZYNQMP_SILICON_V2,
+ ZYNQMP_SILICON_V3,
+ ZYNQMP_SILICON_V4,
+};
+
+static inline char *zynqmp_nvmem_get_silicon_version(struct device *dev,
+ const char *cname)
+{
+ struct nvmem_cell *cell;
+ ssize_t data;
+ char *ret;
+
+ cell = nvmem_cell_get(dev, cname);
+ if (IS_ERR(cell))
+ return ERR_CAST(cell);
+
+ ret = nvmem_cell_read(cell, &data);
+ nvmem_cell_put(cell);
+
+ return ret;
+}
+
+#endif /* __SOC_ZYNQMP_FW_H__ */
diff --git a/include/linux/soc/xilinx/zynqmp/tap_delays.h b/include/linux/soc/xilinx/zynqmp/tap_delays.h
new file mode 100644
index 000000000000..5f2ef35c0d8e
--- /dev/null
+++ b/include/linux/soc/xilinx/zynqmp/tap_delays.h
@@ -0,0 +1,32 @@
+/*
+ * Xilinx Zynq MPSoC Power Management
+ *
+ * Copyright (C) 2016 - 2018, Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#ifdef CONFIG_ARCH_ZYNQMP
+/* API for programming the tap delays */
+void arasan_zynqmp_set_tap_delay(u8 deviceid, u8 itap_delay, u8 otap_delay);
+
+/* API to reset the DLL */
+void zynqmp_dll_reset(u8 deviceid);
+#else
+inline void arasan_zynqmp_set_tap_delay(u8 deviceid, u8 itap_delay,
+ u8 otap_delay) {}
+inline void zynqmp_dll_reset(u8 deviceid) {}
+#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 053abd22ad31..1f8be03e0560 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -464,6 +464,20 @@ struct spi_controller {
#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
+ /* Controller may support data stripe feature when more than one
+ * chips are present.
+ * Setting data stripe will send data in following manner:
+ * -> even bytes i.e. 0, 2, 4,... are transmitted on lower data bus
+ * -> odd bytes i.e. 1, 3, 5,.. are transmitted on upper data bus
+ */
+#define SPI_MASTER_QUAD_MODE BIT(6) /* support quad mode */
+#define SPI_MASTER_DATA_STRIPE BIT(7) /* support data stripe */
+ /* Controller may support asserting more than one chip select at once.
+ * This flag will enable that feature.
+ */
+#define SPI_MASTER_BOTH_CS BIT(8) /* assert both chip selects */
+#define SPI_MASTER_U_PAGE BIT(9) /* select upper flash */
+
/* flag indicating this is an SPI slave controller */
bool slave;
@@ -732,6 +746,7 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @len: size of rx and tx buffers (in bytes)
* @speed_hz: Select a speed other than the device default for this
* transfer. If 0 the default (from @spi_device) is used.
+ * @dummy: number of dummy cycles.
* @bits_per_word: select a bits_per_word other than the device default
* for this transfer. If 0 the default (from @spi_device) is used.
* @cs_change: affects chipselect after this transfer completes
@@ -745,6 +760,7 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
+ * @stripe: true-> enable stripe, false-> disable stripe.
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf.
@@ -826,7 +842,8 @@ struct spi_transfer {
u16 delay_usecs;
u32 speed_hz;
u16 word_delay;
-
+ u32 dummy;
+ bool stripe;
struct list_head transfer_list;
};
@@ -1278,7 +1295,6 @@ struct spi_board_info {
/* slower signaling on noisy or low voltage boards */
u32 max_speed_hz;
-
/* bus_num is board specific and matches the bus_num of some
* spi_controller that will probably be registered later.
*
@@ -1358,6 +1374,9 @@ of_find_spi_device_by_node(struct device_node *node)
#endif /* IS_ENABLED(CONFIG_OF) */
+bool
+update_stripe(const u8 opcode);
+
/* Compatibility layer */
#define spi_master spi_controller
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index edd89b7c8f18..5661a4459dda 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -61,7 +61,8 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */
#define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13)
#define CI_HDRC_IMX_IS_HSIC BIT(14)
-#define CI_HDRC_PMQOS BIT(15)
+#define CI_HDRC_PHY_VBUS_CONTROL BIT(15)
+#define CI_HDRC_PMQOS BIT(16)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
diff --git a/include/linux/usb/xhci_pdriver.h b/include/linux/usb/xhci_pdriver.h
new file mode 100644
index 000000000000..af8043181395
--- /dev/null
+++ b/include/linux/usb/xhci_pdriver.h
@@ -0,0 +1,29 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __USB_CORE_XHCI_PDRIVER_H
+#define __USB_CORE_XHCI_PDRIVER_H
+
+/* Call dwc3_host_wakeup_capable() only for dwc3 DRD mode or HOST only mode */
+#if (IS_REACHABLE(CONFIG_USB_DWC3_HOST) || \
+ (IS_REACHABLE(CONFIG_USB_DWC3_OF_SIMPLE) && \
+ !IS_REACHABLE(CONFIG_USB_DWC3_GADGET)))
+
+ /* Let the dwc3 driver know about device wakeup capability */
+void dwc3_host_wakeup_capable(struct device *dev, bool wakeup);
+
+#else
+void dwc3_host_wakeup_capable(struct device *dev, bool wakeup)
+{ ; }
+#endif
+
+#endif /* __USB_CORE_XHCI_PDRIVER_H */
diff --git a/include/linux/xilinx_phy.h b/include/linux/xilinx_phy.h
new file mode 100644
index 000000000000..34a048f7dbe6
--- /dev/null
+++ b/include/linux/xilinx_phy.h
@@ -0,0 +1,20 @@
+#ifndef _XILINX_PHY_H
+#define _XILINX_PHY_H
+
+/* Mask used for ID comparisons */
+#define XILINX_PHY_ID_MASK 0xfffffff0
+
+/* Known PHY IDs */
+#define XILINX_PHY_ID 0x01740c00
+
+/* struct phy_device dev_flags definitions */
+#define XAE_PHY_TYPE_MII 0
+#define XAE_PHY_TYPE_GMII 1
+#define XAE_PHY_TYPE_RGMII_1_3 2
+#define XAE_PHY_TYPE_RGMII_2_0 3
+#define XAE_PHY_TYPE_SGMII 4
+#define XAE_PHY_TYPE_1000BASE_X 5
+#define XAE_PHY_TYPE_2500 6
+#define XXE_PHY_TYPE_USXGMII 7
+
+#endif /* _XILINX_PHY_H */
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 8cb2c504a05c..96310de47a25 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -205,6 +205,9 @@ struct media_pad {
* @link_validate: Return whether a link is valid from the entity point of
* view. The media_pipeline_start() function
* validates all links by calling this operation. Optional.
+ * @has_route: Return whether a route exists inside the entity between
+ * two given pads. Optional. If the operation isn't
+ * implemented all pads will be considered as connected.
*
* .. note::
*
@@ -217,6 +220,8 @@ struct media_entity_operations {
const struct media_pad *local,
const struct media_pad *remote, u32 flags);
int (*link_validate)(struct media_link *link);
+ bool (*has_route)(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1);
};
/**
@@ -879,6 +884,9 @@ int media_entity_get_fwnode_pad(struct media_entity *entity,
struct fwnode_handle *fwnode,
unsigned long direction_flags);
+bool media_entity_has_route(struct media_entity *entity, unsigned int sink,
+ unsigned int source);
+
/**
* media_graph_walk_init - Allocate resources used by graph walk.
*
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 7168311e8ecc..ce025f06c2ef 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -710,6 +710,10 @@ struct v4l2_subdev_pad_ops {
struct v4l2_mbus_frame_desc *fd);
int (*set_frame_desc)(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_frame_desc *fd);
+ int (*get_routing)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_routing *route);
+ int (*set_routing)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_routing *route);
};
/**
diff --git a/include/soc/xilinx/xlnx_vcu.h b/include/soc/xilinx/xlnx_vcu.h
new file mode 100644
index 000000000000..ff03ede993ed
--- /dev/null
+++ b/include/soc/xilinx/xlnx_vcu.h
@@ -0,0 +1,39 @@
+/*
+ * Xilinx VCU Init
+ *
+ * Copyright (C) 2016 - 2017 Xilinx, Inc.
+ *
+ * Contacts Dhaval Shah <dshah@xilinx.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+#ifndef _XILINX_VCU_H_
+#define _XILINX_VCU_H_
+
+#define VCU_GASKET_INIT 0x74
+#define VCU_GASKET_VALUE 0x03
+
+/**
+ * struct xvcu_device - Xilinx VCU init device structure
+ * @dev: Platform device
+ * @pll_ref: pll ref clock source
+ * @reset_gpio: vcu reset gpio
+ * @aclk: axi clock source
+ * @logicore_reg_ba: logicore reg base address
+ * @vcu_slcr_ba: vcu_slcr Register base address
+ */
+struct xvcu_device {
+ struct device *dev;
+ struct clk *pll_ref;
+ struct clk *aclk;
+ struct gpio_desc *reset_gpio;
+ void __iomem *logicore_reg_ba;
+ void __iomem *vcu_slcr_ba;
+};
+
+u32 xvcu_get_color_depth(struct xvcu_device *xvcu);
+u32 xvcu_get_memory_depth(struct xvcu_device *xvcu);
+u32 xvcu_get_clock_frequency(struct xvcu_device *xvcu);
+u32 xvcu_get_num_cores(struct xvcu_device *xvcu);
+
+#endif /* _XILINX_VCU_H_ */
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 3feeaa3f987a..3193c3c3f5c0 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -145,6 +145,14 @@ extern "C" {
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
/*
+ * 2 plane 10 bit per component YCbCr
+ * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian
+ * index 1 = Cb:Cr plane, [63:0] x:Cb2:Cr2:Cb1:x:Cr1:Cb0:Cr0 2:10:10:10:2:10:10:10 little endian
+ */
+#define DRM_FORMAT_XV15 fourcc_code('X', 'V', '1', '5') /* 2x2 subsampled Cb:Cr plane 2:10:10:10 */
+#define DRM_FORMAT_XV20 fourcc_code('X', 'V', '2', '0') /* 2x1 subsampled Cb:Cr plane 2:10:10:10 */
+
+/*
* Floating point 64bpp RGB
* IEEE 754-2008 binary16 half-precision float
* [15:0] sign:exponent:mantissa 1:5:10
@@ -209,6 +217,13 @@ extern "C" {
#define DRM_FORMAT_YUV420_8BIT fourcc_code('Y', 'U', '0', '8')
#define DRM_FORMAT_YUV420_10BIT fourcc_code('Y', 'U', '1', '0')
+#define DRM_FORMAT_AVUY fourcc_code('A', 'V', 'U', 'Y') /* [31:0] A:Cr:Cb:Y 8:8:8:8 little endian */
+#define DRM_FORMAT_XVUY8888 fourcc_code('X', 'V', '2', '4') /* [31:0] x:Cr:Cb:Y 8:8:8:8 little endian */
+#define DRM_FORMAT_XVUY2101010 fourcc_code('X', 'V', '3', '0') /* [31:0] x:Cr:Cb:Y 2:10:10:10 little endian */
+
+/* Grey scale */
+#define DRM_FORMAT_Y8 fourcc_code('G', 'R', 'E', 'Y') /* 8 Greyscale */
+#define DRM_FORMAT_Y10 fourcc_code('Y', '1', '0', ' ') /* 10 Greyscale */
/*
* 2 plane RGB + A
* index 0 = RGB plane, same format as the corresponding non _A8 format has
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 83cd1636b9be..b35aebf3f8d1 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -475,6 +475,8 @@ struct drm_mode_fb_cmd {
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+#define DRM_MODE_FB_ALTERNATE_TOP (1<<2) /* for alternate top field */
+#define DRM_MODE_FB_ALTERNATE_BOTTOM (1<<3) /* for alternate bottom field */
struct drm_mode_fb_cmd2 {
__u32 fb_id;
diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
index bc2bcdec377b..aa31b18ebf0a 100644
--- a/include/uapi/linux/if_alg.h
+++ b/include/uapi/linux/if_alg.h
@@ -35,6 +35,7 @@ struct af_alg_iv {
#define ALG_SET_OP 3
#define ALG_SET_AEAD_ASSOCLEN 4
#define ALG_SET_AEAD_AUTHSIZE 5
+#define ALG_SET_KEY_TYPE 6
/* Operations */
#define ALG_OP_DECRYPT 0
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 2a6b253cfb05..7e75f4d8319b 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -34,7 +34,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x101c */
+/* RGB - next is 0x101e */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -62,8 +62,11 @@
#define MEDIA_BUS_FMT_RGB101010_1X30 0x1018
#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
+#define MEDIA_BUS_FMT_RBG101010_1X30 0x101b
+#define MEDIA_BUS_FMT_RBG121212_1X36 0x101c
+#define MEDIA_BUS_FMT_RBG161616_1X48 0x101d
-/* YUV (including grey) - next is 0x202d */
+/* YUV (including grey) - next is 0x2035 */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -80,11 +83,13 @@
#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019
#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b
#define MEDIA_BUS_FMT_YVYU10_2X10 0x200c
+#define MEDIA_BUS_FMT_VYYUYY10_4X20 0x2031
#define MEDIA_BUS_FMT_Y12_1X12 0x2013
#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c
#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d
#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
+#define MEDIA_BUS_FMT_VUY12_1X36 0x2033
#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f
#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010
#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011
@@ -102,12 +107,19 @@
#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022
#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023
#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
+#define MEDIA_BUS_FMT_VUY10_1X30 0x2032
#define MEDIA_BUS_FMT_UYYVYY10_0_5X30 0x2027
#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
#define MEDIA_BUS_FMT_UYYVYY12_0_5X36 0x2028
#define MEDIA_BUS_FMT_YUV12_1X36 0x2029
#define MEDIA_BUS_FMT_YUV16_1X48 0x202a
+#define MEDIA_BUS_FMT_VUY16_1X48 0x2034
#define MEDIA_BUS_FMT_UYYVYY16_0_5X48 0x202b
+#define MEDIA_BUS_FMT_VYYUYY8_1X24 0x202c
+#define MEDIA_BUS_FMT_Y16_1X16 0x202d
+#define MEDIA_BUS_FMT_UYYVYY12_4X24 0x202e
+#define MEDIA_BUS_FMT_UYYVYY16_4X32 0x202f
+#define MEDIA_BUS_FMT_UYVY16_2X32 0x2030
/* Bayer - next is 0x3021 */
#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001
diff --git a/include/uapi/linux/uio/uio.h b/include/uapi/linux/uio/uio.h
new file mode 100644
index 000000000000..db92d311c85f
--- /dev/null
+++ b/include/uapi/linux/uio/uio.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * The header for UIO driver
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ */
+
+#ifndef _UAPI_UIO_UIO_H_
+#define _UAPI_UIO_UIO_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * enum uio_dmabuf_dir - list of dma directions for mapping management
+ * @UIO_DMABUF_DIR_BIDIR: Bidirectional DMA. To and from device
+ * @UIO_DMABUF_DIR_TO_DEV: DMA to device
+ * @UIO_DMABUF_DIR_FROM_DEV: DMA from device
+ * @UIO_DMABUF_DIR_NONE: Direction not specified
+ */
+enum uio_dmabuf_dir {
+ UIO_DMABUF_DIR_BIDIR = 1,
+ UIO_DMABUF_DIR_TO_DEV = 2,
+ UIO_DMABUF_DIR_FROM_DEV = 3,
+ UIO_DMABUF_DIR_NONE = 4,
+};
+
+/**
+ * struct uio_dmabuf_args - arguments from userspace to map / unmap dmabuf
+ * @dbuf_fd: The fd or dma buf
+ * @dma_addr: The dma address of dmabuf @dbuf_fd
+ * @size: The size of dmabuf @dbuf_fd
+ * @dir: direction of dma transfer of dmabuf @dbuf_fd
+ */
+struct uio_dmabuf_args {
+ __s32 dbuf_fd;
+ __u64 dma_addr;
+ __u64 size;
+ __u8 dir;
+};
+
+#define UIO_IOC_BASE 'U'
+
+/**
+ * DOC: UIO_IOC_MAP_DMABUF - Map the dma buf to userspace uio application
+ *
+ * This takes uio_dmabuf_args, and maps the given dmabuf @dbuf_fd and returns
+ * information to userspace.
+ * FIXME: This is experimental and may change at any time. Don't consider this
+ * as stable ABI.
+ */
+#define UIO_IOC_MAP_DMABUF _IOWR(UIO_IOC_BASE, 0x1, struct uio_dmabuf_args)
+
+/**
+ * DOC: UIO_IOC_UNMAP_DMABUF - Unmap the dma buf
+ *
+ * This takes uio_dmabuf_args, and unmaps the previous mapped dmabuf @dbuf_fd.
+ * FIXME: This is experimental and may change at any time. Don't consider this
+ * as stable ABI.
+ */
+#define UIO_IOC_UNMAP_DMABUF _IOWR(UIO_IOC_BASE, 0x2, struct uio_dmabuf_args)
+
+#endif
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index 123a231001a8..325c985ed06f 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -68,6 +68,8 @@ enum v4l2_mbus_pixelcode {
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_BE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_LE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(ARGB8888_1X32),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RBG888_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_1X32_PADHI),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(UV8_1X8),
@@ -104,6 +106,7 @@ enum v4l2_mbus_pixelcode {
V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VUY8_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG8_1X8),
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index 03970ce30741..b76f9b4afe05 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -155,6 +155,27 @@ struct v4l2_subdev_selection {
__u32 reserved[8];
};
+
+/**
+ * struct v4l2_subdev_route - A signal route inside a subdev
+ * @sink: the sink pad
+ * @source: the source pad
+ */
+struct v4l2_subdev_route {
+ __u32 sink;
+ __u32 source;
+};
+
+/**
+ * struct v4l2_subdev_routing - Routing information
+ * @num_routes: the total number of routes in the routes array
+ * @routes: the routes array
+ */
+struct v4l2_subdev_routing {
+ __u32 num_routes;
+ struct v4l2_subdev_route *routes;
+};
+
/* Backwards compatibility define --- to be removed */
#define v4l2_subdev_edid v4l2_edid
@@ -181,5 +202,7 @@ struct v4l2_subdev_selection {
#define VIDIOC_SUBDEV_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings)
#define VIDIOC_SUBDEV_QUERY_DV_TIMINGS _IOR('V', 99, struct v4l2_dv_timings)
#define VIDIOC_SUBDEV_DV_TIMINGS_CAP _IOWR('V', 100, struct v4l2_dv_timings_cap)
+#define VIDIOC_SUBDEV_G_ROUTING _IOWR('V', 38, struct v4l2_subdev_routing)
+#define VIDIOC_SUBDEV_S_ROUTING _IOWR('V', 39, struct v4l2_subdev_routing)
#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index dcd776e77442..b7f1bcaf37ec 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -546,20 +546,25 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */
#define V4L2_PIX_FMT_ABGR32 v4l2_fourcc('A', 'R', '2', '4') /* 32 BGRA-8-8-8-8 */
#define V4L2_PIX_FMT_XBGR32 v4l2_fourcc('X', 'R', '2', '4') /* 32 BGRX-8-8-8-8 */
-#define V4L2_PIX_FMT_BGRA32 v4l2_fourcc('R', 'A', '2', '4') /* 32 ABGR-8-8-8-8 */
-#define V4L2_PIX_FMT_BGRX32 v4l2_fourcc('R', 'X', '2', '4') /* 32 XBGR-8-8-8-8 */
#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */
#define V4L2_PIX_FMT_RGBA32 v4l2_fourcc('A', 'B', '2', '4') /* 32 RGBA-8-8-8-8 */
#define V4L2_PIX_FMT_RGBX32 v4l2_fourcc('X', 'B', '2', '4') /* 32 RGBX-8-8-8-8 */
#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4') /* 32 ARGB-8-8-8-8 */
+#define V4L2_PIX_FMT_BGRA32 v4l2_fourcc('A', 'B', 'G', 'R') /* 32 ABGR-8-8-8-8 */
#define V4L2_PIX_FMT_XRGB32 v4l2_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */
+#define V4L2_PIX_FMT_BGRX32 v4l2_fourcc('X', 'B', 'G', 'R') /* 32 XBGR-8-8-8-8 */
+#define V4L2_PIX_FMT_XBGR30 v4l2_fourcc('R', 'X', '3', '0') /* 32 XBGR-2-10-10-10 */
+#define V4L2_PIX_FMT_XBGR40 v4l2_fourcc('R', 'X', '4', '0') /* 40 XBGR-4-12-12-12 */
+#define V4L2_PIX_FMT_BGR48 v4l2_fourcc('R', 'G', '4', '8') /* 32 BGR-16-16-16 */
/* Grey formats */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
#define V4L2_PIX_FMT_Y4 v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */
#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
+#define V4L2_PIX_FMT_XY10 v4l2_fourcc('X', 'Y', '1', '0') /* 10 Greyscale 2-10-10-10 */
#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
+#define V4L2_PIX_FMT_XY12 v4l2_fourcc('X', 'Y', '1', '2') /* 12 Greyscale 4-12-12-12 */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
#define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */
@@ -581,6 +586,9 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */
#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */
+#define V4L2_PIX_FMT_XVUY32 v4l2_fourcc('X', 'V', '3', '2') /* 32 XVUY 8:8:8:8 */
+#define V4L2_PIX_FMT_AVUY32 v4l2_fourcc('A', 'V', '3', '2') /* 32 AVUY 8:8:8:8 */
+#define V4L2_PIX_FMT_VUY24 v4l2_fourcc('V', 'U', '2', '4') /* 24 VUY 8:8:8 */
#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */
#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */
#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */
@@ -591,6 +599,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */
#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */
#define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */
+#define V4L2_PIX_FMT_XVUY10 v4l2_fourcc('X', '4', '1', '0') /* 32 XVUY 2-10-10-10 */
/* two planes -- one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
@@ -599,6 +608,14 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */
#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */
+#define V4L2_PIX_FMT_XV20 v4l2_fourcc('X', 'V', '2', '0') /* 32 XY/UV 4:2:2 10-bit */
+#define V4L2_PIX_FMT_XV15 v4l2_fourcc('X', 'V', '1', '5') /* 32 XY/UV 4:2:0 10-bit */
+#define V4L2_PIX_FMT_X012 v4l2_fourcc('X', '0', '1', '2') /* 40 XY/UV 4:2:0 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X212 v4l2_fourcc('X', '2', '1', '2') /* 40 XY/UV 4:2:2 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X412 v4l2_fourcc('X', '4', '1', '2') /* 40 XY/UV 4:4:4 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X016 v4l2_fourcc('X', '0', '1', '6') /* 32 XY/UV 4:2:0 16-bit */
+#define V4L2_PIX_FMT_X216 v4l2_fourcc('X', '2', '1', '6') /* 32 XY/UV 4:2:2 16-bit */
+#define V4L2_PIX_FMT_X416 v4l2_fourcc('X', '4', '1', '6') /* 32 XY/UV 4:4:4 16-bit */
/* two non contiguous planes - one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */
@@ -606,6 +623,14 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV16M v4l2_fourcc('N', 'M', '1', '6') /* 16 Y/CbCr 4:2:2 */
#define V4L2_PIX_FMT_NV61M v4l2_fourcc('N', 'M', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 macroblocks */
+#define V4L2_PIX_FMT_XV20M v4l2_fourcc('X', 'M', '2', '0') /* 32 XY/UV 4:2:2 10-bit */
+#define V4L2_PIX_FMT_XV15M v4l2_fourcc('X', 'M', '1', '5') /* 32 XY/UV 4:2:0 10-bit */
+#define V4L2_PIX_FMT_X012M v4l2_fourcc('M', '0', '1', '2') /* 40 XY/UV 4:2:0 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X212M v4l2_fourcc('M', '2', '1', '2') /* 40 XY/UV 4:2:2 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X412M v4l2_fourcc('M', '4', '1', '2') /* 40 XY/UV 4:4:4 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X016M v4l2_fourcc('M', '0', '1', '6') /* 32 XY/UV 4:2:0 16-bit */
+#define V4L2_PIX_FMT_X216M v4l2_fourcc('M', '2', '1', '6') /* 32 XY/UV 4:2:2 16-bit */
+#define V4L2_PIX_FMT_X416M v4l2_fourcc('M', '4', '1', '6') /* 32 XY/UV 4:4:4 16-bit */
#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 macroblocks */
/* three planes - Y Cb, Cr */
diff --git a/include/uapi/linux/xilinx-csi2rxss.h b/include/uapi/linux/xilinx-csi2rxss.h
new file mode 100644
index 000000000000..df64ddc5eed4
--- /dev/null
+++ b/include/uapi/linux/xilinx-csi2rxss.h
@@ -0,0 +1,18 @@
+#ifndef __UAPI_XILINX_CSI2RXSS_H__
+#define __UAPI_XILINX_CSI2RXSS_H__
+
+#include <linux/videodev2.h>
+
+/*
+ * Events
+ *
+ * V4L2_EVENT_XLNXCSIRX_SPKT: Short packet received
+ * V4L2_EVENT_XLNXCSIRX_SPKT_OVF: Short packet FIFO overflow
+ * V4L2_EVENT_XLNXCSIRX_SLBF: Stream line buffer full
+ */
+#define V4L2_EVENT_XLNXCSIRX_CLASS (V4L2_EVENT_PRIVATE_START | 0x100)
+#define V4L2_EVENT_XLNXCSIRX_SPKT (V4L2_EVENT_XLNXCSIRX_CLASS | 0x1)
+#define V4L2_EVENT_XLNXCSIRX_SPKT_OVF (V4L2_EVENT_XLNXCSIRX_CLASS | 0x2)
+#define V4L2_EVENT_XLNXCSIRX_SLBF (V4L2_EVENT_XLNXCSIRX_CLASS | 0x3)
+
+#endif /* __UAPI_XILINX_CSI2RXSS_H__ */
diff --git a/include/uapi/linux/xilinx-hls.h b/include/uapi/linux/xilinx-hls.h
new file mode 100644
index 000000000000..a7f6447927e0
--- /dev/null
+++ b/include/uapi/linux/xilinx-hls.h
@@ -0,0 +1,21 @@
+#ifndef __UAPI_XILINX_HLS_H__
+#define __UAPI_XILINX_HLS_H__
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+struct xilinx_axi_hls_register {
+ __u32 offset;
+ __u32 value;
+};
+
+struct xilinx_axi_hls_registers {
+ __u32 num_regs;
+ struct xilinx_axi_hls_register __user *regs;
+};
+
+#define XILINX_AXI_HLS_READ _IOWR('V', BASE_VIDIOC_PRIVATE+0, struct xilinx_axi_hls_registers)
+#define XILINX_AXI_HLS_WRITE _IOW('V', BASE_VIDIOC_PRIVATE+1, struct xilinx_axi_hls_registers)
+
+#endif /* __UAPI_XILINX_HLS_H__ */
diff --git a/include/uapi/linux/xilinx-sdirxss.h b/include/uapi/linux/xilinx-sdirxss.h
new file mode 100644
index 000000000000..b7a98041f169
--- /dev/null
+++ b/include/uapi/linux/xilinx-sdirxss.h
@@ -0,0 +1,66 @@
+#ifndef __UAPI_XILINX_SDIRXSS_H__
+#define __UAPI_XILINX_SDIRXSS_H__
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+/*
+ * Events
+ *
+ * V4L2_EVENT_XLNXSDIRX_VIDUNLOCK: Video unlock event
+ * V4L2_EVENT_XLNXSDIRX_UNDERFLOW: Video in to AXI4 Stream core underflowed
+ * V4L2_EVENT_XLNXSDIRX_OVERFLOW: Video in to AXI4 Stream core overflowed
+ */
+#define V4L2_EVENT_XLNXSDIRX_CLASS (V4L2_EVENT_PRIVATE_START | 0x200)
+#define V4L2_EVENT_XLNXSDIRX_VIDUNLOCK (V4L2_EVENT_XLNXSDIRX_CLASS | 0x1)
+#define V4L2_EVENT_XLNXSDIRX_UNDERFLOW (V4L2_EVENT_XLNXSDIRX_CLASS | 0x2)
+#define V4L2_EVENT_XLNXSDIRX_OVERFLOW (V4L2_EVENT_XLNXSDIRX_CLASS | 0x3)
+
+/*
+ * This enum is used to prepare the bitmask
+ * of modes to be detected
+ */
+enum {
+ XSDIRX_MODE_SD_OFFSET = 0,
+ XSDIRX_MODE_HD_OFFSET,
+ XSDIRX_MODE_3G_OFFSET,
+ XSDIRX_MODE_6G_OFFSET,
+ XSDIRX_MODE_12GI_OFFSET,
+ XSDIRX_MODE_12GF_OFFSET,
+ XSDIRX_MODE_NUM_SUPPORTED,
+};
+
+#define XSDIRX_DETECT_ALL_MODES (BIT(XSDIRX_MODE_SD_OFFSET) | \
+ BIT(XSDIRX_MODE_HD_OFFSET) | \
+ BIT(XSDIRX_MODE_3G_OFFSET) | \
+ BIT(XSDIRX_MODE_6G_OFFSET) | \
+ BIT(XSDIRX_MODE_12GI_OFFSET) | \
+ BIT(XSDIRX_MODE_12GF_OFFSET))
+
+/*
+ * EDH Error Types
+ * ANC - Ancillary Data Packet Errors
+ * FF - Full Field Errors
+ * AP - Active Portion Errors
+ */
+
+#define XSDIRX_EDH_ERRCNT_ANC_EDH_ERR BIT(0)
+#define XSDIRX_EDH_ERRCNT_ANC_EDA_ERR BIT(1)
+#define XSDIRX_EDH_ERRCNT_ANC_IDH_ERR BIT(2)
+#define XSDIRX_EDH_ERRCNT_ANC_IDA_ERR BIT(3)
+#define XSDIRX_EDH_ERRCNT_ANC_UES_ERR BIT(4)
+#define XSDIRX_EDH_ERRCNT_FF_EDH_ERR BIT(5)
+#define XSDIRX_EDH_ERRCNT_FF_EDA_ERR BIT(6)
+#define XSDIRX_EDH_ERRCNT_FF_IDH_ERR BIT(7)
+#define XSDIRX_EDH_ERRCNT_FF_IDA_ERR BIT(8)
+#define XSDIRX_EDH_ERRCNT_FF_UES_ERR BIT(9)
+#define XSDIRX_EDH_ERRCNT_AP_EDH_ERR BIT(10)
+#define XSDIRX_EDH_ERRCNT_AP_EDA_ERR BIT(11)
+#define XSDIRX_EDH_ERRCNT_AP_IDH_ERR BIT(12)
+#define XSDIRX_EDH_ERRCNT_AP_IDA_ERR BIT(13)
+#define XSDIRX_EDH_ERRCNT_AP_UES_ERR BIT(14)
+#define XSDIRX_EDH_ERRCNT_PKT_CHKSUM_ERR BIT(15)
+
+#define XSDIRX_EDH_ALLERR_MASK 0xFFFF
+
+#endif /* __UAPI_XILINX_SDIRXSS_H__ */
diff --git a/include/uapi/linux/xilinx-v4l2-controls.h b/include/uapi/linux/xilinx-v4l2-controls.h
index b6441fe705c5..61a02a326515 100644
--- a/include/uapi/linux/xilinx-v4l2-controls.h
+++ b/include/uapi/linux/xilinx-v4l2-controls.h
@@ -70,5 +70,146 @@
#define V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH (V4L2_CID_XILINX_TPG + 16)
/* Noise level */
#define V4L2_CID_XILINX_TPG_NOISE_GAIN (V4L2_CID_XILINX_TPG + 17)
+/* Foreground pattern (HLS)*/
+#define V4L2_CID_XILINX_TPG_HLS_FG_PATTERN (V4L2_CID_XILINX_TPG + 18)
+/*
+ * Xilinx CRESAMPLE Video IP
+ */
+
+#define V4L2_CID_XILINX_CRESAMPLE (V4L2_CID_USER_BASE + 0xc020)
+
+/* The field parity for interlaced video */
+#define V4L2_CID_XILINX_CRESAMPLE_FIELD_PARITY (V4L2_CID_XILINX_CRESAMPLE + 1)
+/* Specify if the first line of video contains the Chroma information */
+#define V4L2_CID_XILINX_CRESAMPLE_CHROMA_PARITY (V4L2_CID_XILINX_CRESAMPLE + 2)
+
+/*
+ * Xilinx RGB2YUV Video IPs
+ */
+
+#define V4L2_CID_XILINX_RGB2YUV (V4L2_CID_USER_BASE + 0xc040)
+
+/* Maximum Luma(Y) value */
+#define V4L2_CID_XILINX_RGB2YUV_YMAX (V4L2_CID_XILINX_RGB2YUV + 1)
+/* Minimum Luma(Y) value */
+#define V4L2_CID_XILINX_RGB2YUV_YMIN (V4L2_CID_XILINX_RGB2YUV + 2)
+/* Maximum Cb Chroma value */
+#define V4L2_CID_XILINX_RGB2YUV_CBMAX (V4L2_CID_XILINX_RGB2YUV + 3)
+/* Minimum Cb Chroma value */
+#define V4L2_CID_XILINX_RGB2YUV_CBMIN (V4L2_CID_XILINX_RGB2YUV + 4)
+/* Maximum Cr Chroma value */
+#define V4L2_CID_XILINX_RGB2YUV_CRMAX (V4L2_CID_XILINX_RGB2YUV + 5)
+/* Minimum Cr Chroma value */
+#define V4L2_CID_XILINX_RGB2YUV_CRMIN (V4L2_CID_XILINX_RGB2YUV + 6)
+/* The offset compensation value for Luma(Y) */
+#define V4L2_CID_XILINX_RGB2YUV_YOFFSET (V4L2_CID_XILINX_RGB2YUV + 7)
+/* The offset compensation value for Cb Chroma */
+#define V4L2_CID_XILINX_RGB2YUV_CBOFFSET (V4L2_CID_XILINX_RGB2YUV + 8)
+/* The offset compensation value for Cr Chroma */
+#define V4L2_CID_XILINX_RGB2YUV_CROFFSET (V4L2_CID_XILINX_RGB2YUV + 9)
+
+/* Y = CA * R + (1 - CA - CB) * G + CB * B */
+
+/* CA coefficient */
+#define V4L2_CID_XILINX_RGB2YUV_ACOEF (V4L2_CID_XILINX_RGB2YUV + 10)
+/* CB coefficient */
+#define V4L2_CID_XILINX_RGB2YUV_BCOEF (V4L2_CID_XILINX_RGB2YUV + 11)
+/* CC coefficient */
+#define V4L2_CID_XILINX_RGB2YUV_CCOEF (V4L2_CID_XILINX_RGB2YUV + 12)
+/* CD coefficient */
+#define V4L2_CID_XILINX_RGB2YUV_DCOEF (V4L2_CID_XILINX_RGB2YUV + 13)
+
+/*
+ * Xilinx HLS Video IP
+ */
+
+#define V4L2_CID_XILINX_HLS (V4L2_CID_USER_BASE + 0xc060)
+
+/* The IP model */
+#define V4L2_CID_XILINX_HLS_MODEL (V4L2_CID_XILINX_HLS + 1)
+
+/*
+ * Xilinx MIPI CSI2 Rx Subsystem
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_MIPICSISS (V4L2_CID_USER_BASE + 0xc080)
+
+/* Active Lanes */
+#define V4L2_CID_XILINX_MIPICSISS_ACT_LANES (V4L2_CID_XILINX_MIPICSISS + 1)
+/* Frames received since streaming is set */
+#define V4L2_CID_XILINX_MIPICSISS_FRAME_COUNTER (V4L2_CID_XILINX_MIPICSISS + 2)
+/* Reset all event counters */
+#define V4L2_CID_XILINX_MIPICSISS_RESET_COUNTERS (V4L2_CID_XILINX_MIPICSISS + 3)
+
+/*
+ * Xilinx Gamma Correction IP
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_GAMMA_CORR (V4L2_CID_USER_BASE + 0xc0c0)
+/* Adjust Red Gamma */
+#define V4L2_CID_XILINX_GAMMA_CORR_RED_GAMMA (V4L2_CID_XILINX_GAMMA_CORR + 1)
+/* Adjust Blue Gamma */
+#define V4L2_CID_XILINX_GAMMA_CORR_BLUE_GAMMA (V4L2_CID_XILINX_GAMMA_CORR + 2)
+/* Adjust Green Gamma */
+#define V4L2_CID_XILINX_GAMMA_CORR_GREEN_GAMMA (V4L2_CID_XILINX_GAMMA_CORR + 3)
+
+/*
+ * Xilinx Color Space Converter (CSC) VPSS
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_CSC (V4L2_CID_USER_BASE + 0xc0a0)
+/* Adjust Brightness */
+#define V4L2_CID_XILINX_CSC_BRIGHTNESS (V4L2_CID_XILINX_CSC + 1)
+/* Adjust Contrast */
+#define V4L2_CID_XILINX_CSC_CONTRAST (V4L2_CID_XILINX_CSC + 2)
+/* Adjust Red Gain */
+#define V4L2_CID_XILINX_CSC_RED_GAIN (V4L2_CID_XILINX_CSC + 3)
+/* Adjust Green Gain */
+#define V4L2_CID_XILINX_CSC_GREEN_GAIN (V4L2_CID_XILINX_CSC + 4)
+/* Adjust Blue Gain */
+#define V4L2_CID_XILINX_CSC_BLUE_GAIN (V4L2_CID_XILINX_CSC + 5)
+
+/*
+ * Xilinx SDI Rx Subsystem
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_SDIRX (V4L2_CID_USER_BASE + 0xc100)
+
+/* Framer Control */
+#define V4L2_CID_XILINX_SDIRX_FRAMER (V4L2_CID_XILINX_SDIRX + 1)
+/* Video Lock Window Control */
+#define V4L2_CID_XILINX_SDIRX_VIDLOCK_WINDOW (V4L2_CID_XILINX_SDIRX + 2)
+/* EDH Error Mask Control */
+#define V4L2_CID_XILINX_SDIRX_EDH_ERRCNT_ENABLE (V4L2_CID_XILINX_SDIRX + 3)
+/* Mode search Control */
+#define V4L2_CID_XILINX_SDIRX_SEARCH_MODES (V4L2_CID_XILINX_SDIRX + 4)
+/* Get Detected Mode control */
+#define V4L2_CID_XILINX_SDIRX_MODE_DETECT (V4L2_CID_XILINX_SDIRX + 5)
+/* Get CRC error status */
+#define V4L2_CID_XILINX_SDIRX_CRC (V4L2_CID_XILINX_SDIRX + 6)
+/* Get EDH error count control */
+#define V4L2_CID_XILINX_SDIRX_EDH_ERRCNT (V4L2_CID_XILINX_SDIRX + 7)
+/* Get EDH status control */
+#define V4L2_CID_XILINX_SDIRX_EDH_STATUS (V4L2_CID_XILINX_SDIRX + 8)
+/* Get Transport Interlaced status */
+#define V4L2_CID_XILINX_SDIRX_TS_IS_INTERLACED (V4L2_CID_XILINX_SDIRX + 9)
+/* Get Active Streams count */
+#define V4L2_CID_XILINX_SDIRX_ACTIVE_STREAMS (V4L2_CID_XILINX_SDIRX + 10)
+/* Is Mode 3GB */
+#define V4L2_CID_XILINX_SDIRX_IS_3GB (V4L2_CID_XILINX_SDIRX + 11)
+
+/*
+ * Xilinx VIP
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_VIP (V4L2_CID_USER_BASE + 0xc120)
+
+/* Low latency mode */
+#define V4L2_CID_XILINX_LOW_LATENCY (V4L2_CID_XILINX_VIP + 1)
#endif /* __UAPI_XILINX_V4L2_CONTROLS_H__ */
diff --git a/include/uapi/linux/xilinx-v4l2-events.h b/include/uapi/linux/xilinx-v4l2-events.h
new file mode 100644
index 000000000000..e31e998eba67
--- /dev/null
+++ b/include/uapi/linux/xilinx-v4l2-events.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx V4L2 SCD Driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ *
+ */
+
+#ifndef __UAPI_XILINX_V4L2_EVENTS_H__
+#define __UAPI_XILINX_V4L2_EVENTS_H__
+
+#include <linux/videodev2.h>
+
+/*
+ * Events
+ *
+ * V4L2_EVENT_XLNXSCD: Scene Change Detection
+ */
+#define V4L2_EVENT_XLNXSCD_CLASS (V4L2_EVENT_PRIVATE_START | 0x300)
+#define V4L2_EVENT_XLNXSCD (V4L2_EVENT_XLNXSCD_CLASS | 0x1)
+
+#endif /* __UAPI_XILINX_V4L2_EVENTS_H__ */
diff --git a/include/uapi/linux/xlnx_ctrl.h b/include/uapi/linux/xlnx_ctrl.h
new file mode 100644
index 000000000000..35ff1fdbf65b
--- /dev/null
+++ b/include/uapi/linux/xlnx_ctrl.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Xilinx Controls Header
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Contacts: Saurabh Sengar <saurabh.singh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __UAPI_XLNX_CTRL_H__
+#define __UAPI_XLNX_CTRL_H__
+
+#define XSET_FB_CAPTURE 16
+#define XSET_FB_CONFIGURE 17
+#define XSET_FB_ENABLE 18
+#define XSET_FB_DISABLE 19
+#define XSET_FB_RELEASE 20
+#define XSET_FB_ENABLE_SNGL 21
+#define XSET_FB_POLL 22
+#define XVPSS_SET_CONFIGURE 16
+#define XVPSS_SET_ENABLE 17
+#define XVPSS_SET_DISABLE 18
+
+#endif /* __UAPI_XLNX_CTRL_H__ */
+
diff --git a/include/uapi/linux/xlnxsync.h b/include/uapi/linux/xlnxsync.h
new file mode 100644
index 000000000000..989b2f1ef93c
--- /dev/null
+++ b/include/uapi/linux/xlnxsync.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __XLNXSYNC_H__
+#define __XLNXSYNC_H__
+
+/* Bit offset in channel status byte */
+/* x = channel */
+#define XLNXSYNC_CHX_FB0_MASK(x) BIT(0 + ((x) << 3))
+#define XLNXSYNC_CHX_FB1_MASK(x) BIT(1 + ((x) << 3))
+#define XLNXSYNC_CHX_FB2_MASK(x) BIT(2 + ((x) << 3))
+#define XLNXSYNC_CHX_ENB_MASK(x) BIT(3 + ((x) << 3))
+#define XLNXSYNC_CHX_SYNC_ERR_MASK(x) BIT(4 + ((x) << 3))
+#define XLNXSYNC_CHX_WDG_ERR_MASK(x) BIT(5 + ((x) << 3))
+
+/*
+ * This is set in the fb_id or channel_id of struct xlnxsync_chan_config when
+ * configuring the channel. This makes the driver auto search for the free
+ * framebuffer or channel slot.
+ */
+#define XLNXSYNC_AUTO_SEARCH 0xFF
+
+#define XLNXSYNC_MAX_ENC_CHANNEL 4
+#define XLNXSYNC_MAX_DEC_CHANNEL 2
+#define XLNXSYNC_BUF_PER_CHANNEL 3
+
+/**
+ * struct xlnxsync_chan_config - Synchronizer channel configuration struct
+ * @luma_start_address: Start address of Luma buffer
+ * @chroma_start_address: Start address of Chroma buffer
+ * @luma_end_address: End address of Luma buffer
+ * @chroma_end_address: End address of Chroma buffer
+ * @luma_margin: Margin for Luma buffer
+ * @chroma_margin: Margin for Chroma buffer
+ * @fb_id: Framebuffer index. Valid values 0/1/2/XLNXSYNC_AUTO_SEARCH
+ * @channel_id: Channel index to be configured.
+ * Valid 0..3 & XLNXSYNC_AUTO_SEARCH
+ * @ismono: Flag to indicate if buffer is Luma only.
+ *
+ * This structure contains the configuration for monitoring a particular
+ * framebuffer on a particular channel.
+ */
+struct xlnxsync_chan_config {
+ u64 luma_start_address;
+ u64 chroma_start_address;
+ u64 luma_end_address;
+ u64 chroma_end_address;
+ u32 luma_margin;
+ u32 chroma_margin;
+ u8 fb_id;
+ u8 channel_id;
+ u8 ismono;
+};
+
+/**
+ * struct xlnxsync_clr_err - Clear channel error
+ * @channel_id: Channel id whose error needs to be cleared
+ * @sync_err: Set this to clear sync error
+ * @wdg_err: Set this to clear watchdog error
+ */
+struct xlnxsync_clr_err {
+ u8 channel_id;
+ u8 sync_err;
+ u8 wdg_err;
+};
+
+/**
+ * struct xlnxsync_fbdone - Framebuffer Done
+ * @status: Framebuffer Done status
+ */
+struct xlnxsync_fbdone {
+ u8 status[XLNXSYNC_MAX_ENC_CHANNEL][XLNXSYNC_BUF_PER_CHANNEL];
+};
+
+/**
+ * struct xlnxsync_config - Synchronizer IP configuration
+ * @encode: true if encoder type, false for decoder type
+ * @max_channels: Maximum channels this IP supports
+ */
+struct xlnxsync_config {
+ u8 encode;
+ u8 max_channels;
+};
+
+#define XLNXSYNC_MAGIC 'X'
+
+/*
+ * This ioctl is used to get the IP config (i.e. encode / decode)
+ * and max number of channels
+ */
+#define XLNXSYNC_GET_CFG _IOR(XLNXSYNC_MAGIC, 1,\
+ struct xlnxsync_config *)
+/* This ioctl is used to get the channel status */
+#define XLNXSYNC_GET_CHAN_STATUS _IOR(XLNXSYNC_MAGIC, 2, u32 *)
+/* This is used to set the framebuffer address for a channel */
+#define XLNXSYNC_SET_CHAN_CONFIG _IOW(XLNXSYNC_MAGIC, 3,\
+ struct xlnxsync_chan_config *)
+/* Enable a channel. The argument is channel number between 0 and 3 */
+#define XLNXSYNC_CHAN_ENABLE _IOR(XLNXSYNC_MAGIC, 4, u8)
+/* Enable a channel. The argument is channel number between 0 and 3 */
+#define XLNXSYNC_CHAN_DISABLE _IOR(XLNXSYNC_MAGIC, 5, u8)
+/* This is used to clear the Sync and Watchdog errors for a channel */
+#define XLNXSYNC_CLR_CHAN_ERR _IOW(XLNXSYNC_MAGIC, 6,\
+ struct xlnxsync_clr_err *)
+/* This is used to get the framebuffer done status for a channel */
+#define XLNXSYNC_GET_CHAN_FBDONE_STAT _IOR(XLNXSYNC_MAGIC, 7,\
+ struct xlnxsync_fbdone *)
+/* This is used to clear the framebuffer done status for a channel */
+#define XLNXSYNC_CLR_CHAN_FBDONE_STAT _IOW(XLNXSYNC_MAGIC, 8,\
+ struct xlnxsync_fbdone *)
+
+#endif
diff --git a/include/uapi/linux/zocl_ioctl.h b/include/uapi/linux/zocl_ioctl.h
new file mode 100644
index 000000000000..ee1f1e289cd8
--- /dev/null
+++ b/include/uapi/linux/zocl_ioctl.h
@@ -0,0 +1,125 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XCL_ZOCL_IOCTL_H_
+#define _XCL_ZOCL_IOCTL_H_
+
+enum {
+ DRM_ZOCL_CREATE_BO = 0,
+ DRM_ZOCL_MAP_BO,
+ DRM_ZOCL_SYNC_BO,
+ DRM_ZOCL_INFO_BO,
+ DRM_ZOCL_PWRITE_BO,
+ DRM_ZOCL_PREAD_BO,
+ DRM_ZOCL_NUM_IOCTLS
+};
+
+enum drm_zocl_sync_bo_dir {
+ DRM_ZOCL_SYNC_BO_TO_DEVICE,
+ DRM_ZOCL_SYNC_BO_FROM_DEVICE
+};
+
+#define DRM_ZOCL_BO_FLAGS_COHERENT 0x00000001
+#define DRM_ZOCL_BO_FLAGS_CMA 0x00000002
+
+struct drm_zocl_create_bo {
+ uint64_t size;
+ uint32_t handle;
+ uint32_t flags;
+};
+
+struct drm_zocl_map_bo {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+};
+
+/**
+ * struct drm_zocl_sync_bo - used for SYNQ_BO IOCTL
+ * @handle: GEM object handle
+ * @dir: DRM_ZOCL_SYNC_DIR_XXX
+ * @offset: Offset into the object to write to
+ * @size: Length of data to write
+ */
+struct drm_zocl_sync_bo {
+ uint32_t handle;
+ enum drm_zocl_sync_bo_dir dir;
+ uint64_t offset;
+ uint64_t size;
+};
+
+/**
+ * struct drm_zocl_info_bo - used for INFO_BO IOCTL
+ * @handle: GEM object handle
+ * @size: Size of BO
+ * @paddr: physical address
+ */
+struct drm_zocl_info_bo {
+ uint32_t handle;
+ uint64_t size;
+ uint64_t paddr;
+};
+
+/**
+ * struct drm_zocl_pwrite_bo - used for PWRITE_BO IOCTL
+ * @handle: GEM object handle
+ * @pad: Padding
+ * @offset: Offset into the object to write to
+ * @size: Length of data to write
+ * @data_ptr: Pointer to read the data from (pointers not 32/64 compatible)
+ */
+struct drm_zocl_pwrite_bo {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t data_ptr;
+};
+
+/**
+ * struct drm_zocl_pread_bo - used for PREAD_BO IOCTL
+ * @handle: GEM object handle
+ * @pad: Padding
+ * @offset: Offset into the object to read from
+ * @size: Length of data to wrreadite
+ * @data_ptr: Pointer to write the data into (pointers not 32/64 compatible)
+ */
+struct drm_zocl_pread_bo {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t data_ptr;
+};
+
+#define DRM_IOCTL_ZOCL_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_CREATE_BO, \
+ struct drm_zocl_create_bo)
+#define DRM_IOCTL_ZOCL_MAP_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_MAP_BO, struct drm_zocl_map_bo)
+#define DRM_IOCTL_ZOCL_SYNC_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_SYNC_BO, struct drm_zocl_sync_bo)
+#define DRM_IOCTL_ZOCL_INFO_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_INFO_BO, struct drm_zocl_info_bo)
+#define DRM_IOCTL_ZOCL_PWRITE_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_PWRITE_BO, \
+ struct drm_zocl_pwrite_bo)
+#define DRM_IOCTL_ZOCL_PREAD_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_PREAD_BO, struct drm_zocl_pread_bo)
+#endif
diff --git a/include/uapi/misc/xilinx_sdfec.h b/include/uapi/misc/xilinx_sdfec.h
new file mode 100644
index 000000000000..13c4a9f9c360
--- /dev/null
+++ b/include/uapi/misc/xilinx_sdfec.h
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Xilinx SD-FEC
+ *
+ * Copyright (C) 2016 - 2017 Xilinx, Inc.
+ *
+ * Description:
+ * This driver is developed for SDFEC16 IP. It provides a char device
+ * in sysfs and supports file operations like open(), close() and ioctl().
+ */
+#ifndef __XILINX_SDFEC_H__
+#define __XILINX_SDFEC_H__
+
+/* Shared LDPC Tables */
+#define XSDFEC_LDPC_SC_TABLE_ADDR_BASE (0x10000)
+#define XSDFEC_LDPC_SC_TABLE_ADDR_HIGH (0x103FC)
+#define XSDFEC_LDPC_LA_TABLE_ADDR_BASE (0x18000)
+#define XSDFEC_LDPC_LA_TABLE_ADDR_HIGH (0x18FFC)
+#define XSDFEC_LDPC_QC_TABLE_ADDR_BASE (0x20000)
+#define XSDFEC_LDPC_QC_TABLE_ADDR_HIGH (0x27FFC)
+
+/**
+ * enum xsdfec_code - Code Type.
+ * @XSDFEC_TURBO_CODE: Driver is configured for Turbo mode.
+ * @XSDFEC_LDPC_CODE: Driver is configured for LDPC mode.
+ *
+ * This enum is used to indicate the mode of the driver. The mode is determined
+ * by checking which codes are set in the driver. Note that the mode cannot be
+ * changed by the driver.
+ */
+enum xsdfec_code {
+ XSDFEC_TURBO_CODE = 0,
+ XSDFEC_LDPC_CODE,
+};
+
+/**
+ * enum xsdfec_order - Order
+ * @XSDFEC_MAINTAIN_ORDER: Maintain order execution of blocks.
+ * @XSDFEC_OUT_OF_ORDER: Out-of-order execution of blocks.
+ *
+ * This enum is used to indicate whether the order of blocks can change from
+ * input to output.
+ */
+enum xsdfec_order {
+ XSDFEC_MAINTAIN_ORDER = 0,
+ XSDFEC_OUT_OF_ORDER,
+};
+
+/**
+ * enum xsdfec_turbo_alg - Turbo Algorithm Type.
+ * @XSDFEC_MAX_SCALE: Max Log-Map algorithm with extrinsic scaling. When
+ * scaling is set to this is equivalent to the Max Log-Map
+ * algorithm.
+ * @XSDFEC_MAX_STAR: Log-Map algorithm.
+ * @XSDFEC_TURBO_ALG_MAX: Used to indicate out of bound Turbo algorithms.
+ *
+ * This enum specifies which Turbo Decode algorithm is in use.
+ */
+enum xsdfec_turbo_alg {
+ XSDFEC_MAX_SCALE = 0,
+ XSDFEC_MAX_STAR,
+ XSDFEC_TURBO_ALG_MAX,
+};
+
+/**
+ * enum xsdfec_state - State.
+ * @XSDFEC_INIT: Driver is initialized.
+ * @XSDFEC_STARTED: Driver is started.
+ * @XSDFEC_STOPPED: Driver is stopped.
+ * @XSDFEC_NEEDS_RESET: Driver needs to be reset.
+ * @XSDFEC_PL_RECONFIGURE: Programmable Logic needs to be recofigured.
+ *
+ * This enum is used to indicate the state of the driver.
+ */
+enum xsdfec_state {
+ XSDFEC_INIT = 0,
+ XSDFEC_STARTED,
+ XSDFEC_STOPPED,
+ XSDFEC_NEEDS_RESET,
+ XSDFEC_PL_RECONFIGURE,
+};
+
+/**
+ * enum xsdfec_axis_width - AXIS_WIDTH.DIN Setting for 128-bit width.
+ * @XSDFEC_1x128b: DIN data input stream consists of a 128-bit lane
+ * @XSDFEC_2x128b: DIN data input stream consists of two 128-bit lanes
+ * @XSDFEC_4x128b: DIN data input stream consists of four 128-bit lanes
+ *
+ * This enum is used to indicate the AXIS_WIDTH.DIN setting for 128-bit width.
+ * The number of lanes of the DIN data input stream depends upon the
+ * AXIS_WIDTH.DIN parameter.
+ */
+enum xsdfec_axis_width {
+ XSDFEC_1x128b = 1,
+ XSDFEC_2x128b = 2,
+ XSDFEC_4x128b = 4,
+};
+
+/**
+ * enum xsdfec_axis_word_include - Words Configuration.
+ * @XSDFEC_FIXED_VALUE: Fixed, the DIN_WORDS AXI4-Stream interface is removed
+ * from the IP instance and is driven with the specified
+ * number of words.
+ * @XSDFEC_IN_BLOCK: In Block, configures the IP instance to expect a single
+ * DIN_WORDS value per input code block. The DIN_WORDS
+ * interface is present.
+ * @XSDFEC_PER_AXI_TRANSACTION: Per Transaction, configures the IP instance to
+ * expect one DIN_WORDS value per input transaction on the DIN interface. The
+ * DIN_WORDS interface is present.
+ * @XSDFEC_AXIS_WORDS_INCLUDE_MAX: Used to indicate out of bound Words
+ * Configurations.
+ *
+ * This enum is used to specify the DIN_WORDS configuration.
+ */
+enum xsdfec_axis_word_include {
+ XSDFEC_FIXED_VALUE = 0,
+ XSDFEC_IN_BLOCK,
+ XSDFEC_PER_AXI_TRANSACTION,
+ XSDFEC_AXIS_WORDS_INCLUDE_MAX,
+};
+
+/**
+ * struct xsdfec_turbo - User data for Turbo codes.
+ * @alg: Specifies which Turbo decode algorithm to use
+ * @scale: Specifies the extrinsic scaling to apply when the Max Scale algorithm
+ * has been selected
+ *
+ * Turbo code structure to communicate parameters to XSDFEC driver.
+ */
+struct xsdfec_turbo {
+ enum xsdfec_turbo_alg alg;
+ u8 scale;
+};
+
+/**
+ * struct xsdfec_ldpc_params - User data for LDPC codes.
+ * @n: Number of code word bits
+ * @k: Number of information bits
+ * @psize: Size of sub-matrix
+ * @nlayers: Number of layers in code
+ * @nqc: Quasi Cyclic Number
+ * @nmqc: Number of M-sized QC operations in parity check matrix
+ * @nm: Number of M-size vectors in N
+ * @norm_type: Normalization required or not
+ * @no_packing: Determines if multiple QC ops should be performed
+ * @special_qc: Sub-Matrix property for Circulant weight > 0
+ * @no_final_parity: Decide if final parity check needs to be performed
+ * @max_schedule: Experimental code word scheduling limit
+ * @sc_off: SC offset
+ * @la_off: LA offset
+ * @qc_off: QC offset
+ * @sc_table: SC Table
+ * @la_table: LA Table
+ * @qc_table: QC Table
+ * @code_id: LDPC Code
+ *
+ * This structure describes the LDPC code that is passed to the driver by the
+ * application.
+ */
+struct xsdfec_ldpc_params {
+ u32 n;
+ u32 k;
+ u32 psize;
+ u32 nlayers;
+ u32 nqc;
+ u32 nmqc;
+ u32 nm;
+ u32 norm_type;
+ u32 no_packing;
+ u32 special_qc;
+ u32 no_final_parity;
+ u32 max_schedule;
+ u32 sc_off;
+ u32 la_off;
+ u32 qc_off;
+ u32 sc_table[XSDFEC_LDPC_SC_TABLE_ADDR_HIGH -
+ XSDFEC_LDPC_SC_TABLE_ADDR_BASE];
+ u32 la_table[XSDFEC_LDPC_LA_TABLE_ADDR_HIGH -
+ XSDFEC_LDPC_LA_TABLE_ADDR_BASE];
+ u32 qc_table[XSDFEC_LDPC_QC_TABLE_ADDR_HIGH -
+ XSDFEC_LDPC_QC_TABLE_ADDR_BASE];
+ u16 code_id;
+};
+
+/**
+ * struct xsdfec_status - Status of SD-FEC core.
+ * @fec_id: ID of SD-FEC instance. ID is limited to the number of active
+ * SD-FEC's in the FPGA and is related to the driver instance
+ * Minor number.
+ * @state: State of the SD-FEC core
+ * @activity: Describes if the SD-FEC instance is Active
+ */
+struct xsdfec_status {
+ s32 fec_id;
+ enum xsdfec_state state;
+ bool activity;
+};
+
+/**
+ * struct xsdfec_irq - Enabling or Disabling Interrupts.
+ * @enable_isr: If true enables the ISR
+ * @enable_ecc_isr: If true enables the ECC ISR
+ */
+struct xsdfec_irq {
+ bool enable_isr;
+ bool enable_ecc_isr;
+};
+
+/**
+ * struct xsdfec_config - Configuration of SD-FEC core.
+ * @fec_id: ID of SD-FEC instance. ID is limited to the number of active
+ * SD-FEC's in the FPGA and is related to the driver instance
+ * Minor number.
+ * @code: The codes being used by the SD-FEC instance
+ * @order: Order of Operation
+ * @bypass: Is the core being bypassed
+ * @code_wr_protect: Is write protection of LDPC codes enabled
+ * @din_width: Width of the DIN AXI4-Stream
+ * @din_word_include: How DIN_WORDS are inputted
+ * @dout_width: Width of the DOUT AXI4-Stream
+ * @dout_word_include: HOW DOUT_WORDS are outputted
+ * @irq: Enabling or disabling interrupts
+ */
+struct xsdfec_config {
+ s32 fec_id;
+ enum xsdfec_code code;
+ enum xsdfec_order order;
+ bool bypass;
+ bool code_wr_protect;
+ enum xsdfec_axis_width din_width;
+ enum xsdfec_axis_word_include din_word_include;
+ enum xsdfec_axis_width dout_width;
+ enum xsdfec_axis_word_include dout_word_include;
+ struct xsdfec_irq irq;
+};
+
+/**
+ * struct xsdfec_stats - Stats retrived by ioctl XSDFEC_GET_STATS. Used
+ * to buffer atomic_t variables from struct
+ * xsdfec_dev. Counts are accumulated until
+ * the user clears them.
+ * @isr_err_count: Count of ISR errors
+ * @cecc_count: Count of Correctable ECC errors (SBE)
+ * @uecc_count: Count of Uncorrectable ECC errors (MBE)
+ */
+struct xsdfec_stats {
+ u32 isr_err_count;
+ u32 cecc_count;
+ u32 uecc_count;
+};
+
+/**
+ * struct xsdfec_ldpc_param_table_sizes - Used to store sizes of SD-FEC table
+ * entries for an individual LPDC code
+ * parameter.
+ * @sc_size: Size of SC table used
+ * @la_size: Size of LA table used
+ * @qc_size: Size of QC table used
+ */
+struct xsdfec_ldpc_param_table_sizes {
+ u32 sc_size;
+ u32 la_size;
+ u32 qc_size;
+};
+
+/**
+ * xsdfec_calculate_shared_ldpc_table_entry_size - Calculates shared code
+ * table sizes.
+ * @ldpc: Pointer to the LPDC Code Parameters
+ * @table_sizes: Pointer to structure containing the calculated table sizes
+ *
+ * Calculates the size of shared LDPC code tables used for a specified LPDC code
+ * parameters.
+ */
+inline void xsdfec_calculate_shared_ldpc_table_entry_size(
+ struct xsdfec_ldpc_params *ldpc,
+ struct xsdfec_ldpc_param_table_sizes *table_sizes)
+{
+ /* Calculate the sc_size in 32 bit words */
+ table_sizes->sc_size = (ldpc->nlayers + 3) >> 2;
+ /* Calculate the la_size in 256 bit words */
+ table_sizes->la_size = ((ldpc->nlayers << 2) + 15) >> 4;
+ /* Calculate the qc_size in 256 bit words */
+ table_sizes->qc_size = ((ldpc->nqc << 2) + 15) >> 4;
+}
+
+/*
+ * XSDFEC IOCTL List
+ */
+#define XSDFEC_MAGIC 'f'
+/**
+ * DOC: XSDFEC_START_DEV
+ *
+ * @Description
+ *
+ * ioctl to start SD-FEC core
+ *
+ * This fails if the XSDFEC_SET_ORDER ioctl has not been previously called
+ */
+#define XSDFEC_START_DEV _IO(XSDFEC_MAGIC, 0)
+/**
+ * DOC: XSDFEC_STOP_DEV
+ *
+ * @Description
+ *
+ * ioctl to stop the SD-FEC core
+ */
+#define XSDFEC_STOP_DEV _IO(XSDFEC_MAGIC, 1)
+/**
+ * DOC: XSDFEC_GET_STATUS
+ *
+ * @Description
+ *
+ * ioctl that returns status of SD-FEC core
+ */
+#define XSDFEC_GET_STATUS _IOR(XSDFEC_MAGIC, 2, struct xsdfec_status *)
+/**
+ * DOC: XSDFEC_SET_IRQ
+ * @Parameters
+ *
+ * @struct xsdfec_irq *
+ * Pointer to the &struct xsdfec_irq that contains the interrupt settings
+ * for the SD-FEC core
+ *
+ * @Description
+ *
+ * ioctl to enable or disable irq
+ */
+#define XSDFEC_SET_IRQ _IOW(XSDFEC_MAGIC, 3, struct xsdfec_irq *)
+/**
+ * DOC: XSDFEC_SET_TURBO
+ * @Parameters
+ *
+ * @struct xsdfec_turbo *
+ * Pointer to the &struct xsdfec_turbo that contains the Turbo decode
+ * settings for the SD-FEC core
+ *
+ * @Description
+ *
+ * ioctl that sets the SD-FEC Turbo parameter values
+ *
+ * This can only be used when the driver is in the XSDFEC_STOPPED state
+ */
+#define XSDFEC_SET_TURBO _IOW(XSDFEC_MAGIC, 4, struct xsdfec_turbo *)
+/**
+ * DOC: XSDFEC_ADD_LDPC_CODE_PARAMS
+ * @Parameters
+ *
+ * @struct xsdfec_ldpc_params *
+ * Pointer to the &struct xsdfec_ldpc_params that contains the LDPC code
+ * parameters to be added to the SD-FEC Block
+ *
+ * @Description
+ * ioctl to add an LDPC code to the SD-FEC LDPC codes
+ *
+ * This can only be used when:
+ *
+ * - Driver is in the XSDFEC_STOPPED state
+ *
+ * - SD-FEC core is configured as LPDC
+ *
+ * - SD-FEC Code Write Protection is disabled
+ */
+#define XSDFEC_ADD_LDPC_CODE_PARAMS \
+ _IOW(XSDFEC_MAGIC, 5, struct xsdfec_ldpc_params *)
+/**
+ * DOC: XSDFEC_GET_CONFIG
+ * @Parameters
+ *
+ * @struct xsdfec_config *
+ * Pointer to the &struct xsdfec_config that contains the current
+ * configuration settings of the SD-FEC Block
+ *
+ * @Description
+ *
+ * ioctl that returns SD-FEC core configuration
+ */
+#define XSDFEC_GET_CONFIG _IOR(XSDFEC_MAGIC, 6, struct xsdfec_config *)
+/**
+ * DOC: XSDFEC_GET_TURBO
+ * @Parameters
+ *
+ * @struct xsdfec_turbo *
+ * Pointer to the &struct xsdfec_turbo that contains the current Turbo
+ * decode settings of the SD-FEC Block
+ *
+ * @Description
+ *
+ * ioctl that returns SD-FEC turbo param values
+ */
+#define XSDFEC_GET_TURBO _IOR(XSDFEC_MAGIC, 7, struct xsdfec_turbo *)
+/**
+ * DOC: XSDFEC_SET_ORDER
+ * @Parameters
+ *
+ * @struct unsigned long *
+ * Pointer to the unsigned long that contains a value from the
+ * @enum xsdfec_order
+ *
+ * @Description
+ *
+ * ioctl that sets order, if order of blocks can change from input to output
+ *
+ * This can only be used when the driver is in the XSDFEC_STOPPED state
+ */
+#define XSDFEC_SET_ORDER _IOW(XSDFEC_MAGIC, 8, unsigned long *)
+/**
+ * DOC: XSDFEC_SET_BYPASS
+ * @Parameters
+ *
+ * @struct bool *
+ * Pointer to bool that sets the bypass value, where false results in
+ * normal operation and false results in the SD-FEC performing the
+ * configured operations (same number of cycles) but output data matches
+ * the input data
+ *
+ * @Description
+ *
+ * ioctl that sets bypass.
+ *
+ * This can only be used when the driver is in the XSDFEC_STOPPED state
+ */
+#define XSDFEC_SET_BYPASS _IOW(XSDFEC_MAGIC, 9, bool *)
+/**
+ * DOC: XSDFEC_IS_ACTIVE
+ * @Parameters
+ *
+ * @struct bool *
+ * Pointer to bool that returns true if the SD-FEC is processing data
+ *
+ * @Description
+ *
+ * ioctl that determines if SD-FEC is processing data
+ */
+#define XSDFEC_IS_ACTIVE _IOR(XSDFEC_MAGIC, 10, bool *)
+/**
+ * DOC: XSDFEC_CLEAR_STATS
+ *
+ * @Description
+ *
+ * ioctl that clears error stats collected during interrupts
+ */
+#define XSDFEC_CLEAR_STATS _IO(XSDFEC_MAGIC, 11)
+/**
+ * DOC: XSDFEC_GET_STATS
+ * @Parameters
+ *
+ * @struct xsdfec_stats *
+ * Pointer to the &struct xsdfec_stats that will contain the updated stats
+ * values
+ *
+ * @Description
+ *
+ * ioctl that returns SD-FEC core stats
+ *
+ * This can only be used when the driver is in the XSDFEC_STOPPED state
+ */
+#define XSDFEC_GET_STATS _IOR(XSDFEC_MAGIC, 12, struct xsdfec_stats *)
+/**
+ * DOC: XSDFEC_SET_DEFAULT_CONFIG
+ *
+ * @Description
+ *
+ * ioctl that returns SD-FEC core to default config, use after a reset
+ *
+ * This can only be used when the driver is in the XSDFEC_STOPPED state
+ */
+#define XSDFEC_SET_DEFAULT_CONFIG _IO(XSDFEC_MAGIC, 13)
+
+#endif /* __XILINX_SDFEC_H__ */
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 9ecfa37c7fbf..bb5dec762b69 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1103,12 +1103,12 @@ static int handle_ctrl_cmd(char *cmd)
case CTRL_P:
if (cmdptr != cmd_tail)
cmdptr = (cmdptr-1) % KDB_CMD_HISTORY_COUNT;
- strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
+ strscpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
return 1;
case CTRL_N:
if (cmdptr != cmd_head)
cmdptr = (cmdptr+1) % KDB_CMD_HISTORY_COUNT;
- strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
+ strscpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
return 1;
}
return 0;
@@ -1315,7 +1315,7 @@ do_full_getstr:
if (*cmdbuf != '\n') {
if (*cmdbuf < 32) {
if (cmdptr == cmd_head) {
- strncpy(cmd_hist[cmd_head], cmd_cur,
+ strscpy(cmd_hist[cmd_head], cmd_cur,
CMD_BUFLEN);
*(cmd_hist[cmd_head] +
strlen(cmd_hist[cmd_head])-1) = '\0';
@@ -1325,7 +1325,7 @@ do_full_getstr:
cmdbuf = cmd_cur;
goto do_full_getstr;
} else {
- strncpy(cmd_hist[cmd_head], cmd_cur,
+ strscpy(cmd_hist[cmd_head], cmd_cur,
CMD_BUFLEN);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 10fa8891be62..3de9cbee39e7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8383,7 +8383,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
/* Make sure the range is really isolated. */
if (test_pages_isolated(outer_start, end, false)) {
- pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
+ pr_debug("%s: [%lx, %lx) PFNs busy\n",
__func__, outer_start, end);
ret = -EBUSY;
goto done;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 2bed6589f41e..17ed11af2a13 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -138,10 +138,7 @@ static int bearer_name_validate(const char *name,
u32 if_len;
/* copy bearer name & ensure length is OK */
- name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
- /* need above in case non-Posix strncpy() doesn't pad with nulls */
- strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
- if (name_copy[TIPC_MAX_BEARER_NAME - 1] != 0)
+ if (strscpy(name_copy, name, TIPC_MAX_BEARER_NAME) < 0)
return 0;
/* ensure all component parts of bearer name are present */
diff --git a/samples/xilinx_apm/Makefile b/samples/xilinx_apm/Makefile
new file mode 100644
index 000000000000..6182750c4479
--- /dev/null
+++ b/samples/xilinx_apm/Makefile
@@ -0,0 +1,71 @@
+#
+# 'make depend' uses makedepend to automatically generate dependencies
+# (dependencies are added to end of Makefile)
+# 'make' build executable file 'mycc'
+# 'make clean' removes all .o and executable files
+#
+
+# define the C compiler to use
+CC = $(CROSS_COMPILE)gcc
+
+# define any compile-time flags
+CFLAGS = -Wall -g
+
+# define any directories containing header files other than /usr/include
+#
+INCLUDES =
+
+# define library paths in addition to /usr/lib
+# if I wanted to include libraries not in /usr/lib I'd specify
+# their path using -Lpath, something like:
+LFLAGS =
+
+# define any libraries to link into executable:
+# if I want to link in libraries (libx.so or libx.a) I use the -llibname
+# option, something like (this will link in libmylib.so and libm.so:
+LIBS = -lm
+
+# define the C source files
+SRCS = main.c xaxipmon.c
+
+# define the C object files
+#
+# This uses Suffix Replacement within a macro:
+# $(name:string1=string2)
+# For each word in 'name' replace 'string1' with 'string2'
+# Below we are replacing the suffix .c of all words in the macro SRCS
+# with the .o suffix
+#
+OBJS = $(SRCS:.c=.o)
+
+# define the executable file
+MAIN = main
+
+#
+# The following part of the makefile is generic; it can be used to
+# build any executable just by changing the definitions above and by
+# deleting dependencies appended to the file from 'make depend'
+#
+
+.PHONY: depend clean
+
+all: $(MAIN)
+ @echo Xilinx AXI Performance Monitor application compiled
+
+$(MAIN): $(OBJS)
+ $(CC) $(CFLAGS) $(INCLUDES) -o $(MAIN) $(OBJS) $(LFLAGS) $(LIBS)
+
+# this is a suffix replacement rule for building .o's from .c's
+# it uses automatic variables $<: the name of the prerequisite of
+# the rule(a .c file) and $@: the name of the target of the rule (a .o file)
+# (see the gnu make manual section about automatic variables)
+.c.o:
+ $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
+
+clean:
+ $(RM) *.o *~ $(MAIN)
+
+depend: $(SRCS)
+ makedepend $(INCLUDES) $^
+
+# DO NOT DELETE THIS LINE -- make depend needs it
diff --git a/samples/xilinx_apm/main.c b/samples/xilinx_apm/main.c
new file mode 100644
index 000000000000..2a7eda4ab256
--- /dev/null
+++ b/samples/xilinx_apm/main.c
@@ -0,0 +1,134 @@
+/*
+ * Xilinx AXI Performance Monitor Example
+ *
+ * Copyright (c) 2013 Xilinx Inc.
+ *
+ * The code may be used by anyone for any purpose and can serve as a
+ * starting point for developing applications using Xilinx AXI
+ * Performance Monitor.
+ *
+ * This example based on Xilinx AXI Performance Monitor UIO driver shows
+ * sequence to read metrics from Xilinx AXI Performance Monitor IP.
+ * User need to provide the uio device file with option -d:
+ * main -d /dev/uio0, say /dev/uio0 as device file for AXI Performance
+ * Monitor driver. User need not clear Interrupt Status Register after
+ * waiting for interrupt on read since driver clears it.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <sys/msg.h>
+#include <sys/ipc.h>
+#include <stdint.h>
+#include "xaxipmon.h"
+
+#define MAP_SIZE 4096
+
+void usage(void)
+{
+ printf("*argv[0] -d <UIO_DEV_FILE> -i|-o <VALUE>\n");
+ printf(" -d UIO device file. e.g. /dev/uio0\n");
+ return;
+}
+
+static void start(int fd)
+{
+ u8 slot = 2;
+ int tmp;
+ u32 isr;
+
+ setmetrics(slot, XAPM_METRIC_SET_4, XAPM_METRIC_COUNTER_0);
+ setsampleinterval(0x3FFFFFF);
+
+ loadsic();
+
+ intrenable(XAPM_IXR_SIC_OVERFLOW_MASK);
+
+ intrglobalenable();
+
+ enablemetricscounter();
+
+ enablesic();
+
+ isr = intrgetstatus();
+ /* Wait for SIC overflow interrupt */
+ if (read(fd, &tmp, 4) < 0)
+ perror("Read\n");
+ /* Driver clears the interrupt and occured interrupt status is
+ stored in param->isr */
+ isr = intrgetstatus();
+ if (isr & XAPM_IXR_SIC_OVERFLOW_MASK)
+ disablesic();
+
+ disablemetricscounter();
+
+ intrdisable(XAPM_IXR_SIC_OVERFLOW_MASK);
+
+ intrglobaldisable();
+
+ printf("Required metrics: %u\n",
+ getsampledmetriccounter(XAPM_METRIC_COUNTER_0) *
+ params->scalefactor);
+}
+
+int main(int argc, char *argv[])
+{
+ int c;
+ char *uiod;
+ int fd;
+
+ while ((c = getopt(argc, argv, "d:h")) != -1) {
+ switch (c) {
+ case 'd':
+ uiod = optarg;
+ break;
+ case 'h':
+ usage();
+ return 0;
+ default:
+ printf("invalid option: %c\n", (char)c);
+ usage();
+ return -1;
+ }
+ }
+
+ /* Open the UIO device file */
+ fd = open(uiod, O_RDWR);
+ if (fd < 1) {
+ perror(argv[0]);
+ printf("Invalid UIO device file:%s.\n", uiod);
+ usage();
+ return -1;
+ }
+
+ baseaddr = (ulong)mmap(0, MAP_SIZE , PROT_READ|PROT_WRITE,
+ MAP_SHARED , fd, 0);
+ if ((u32 *)baseaddr == MAP_FAILED)
+ perror("mmap failed\n");
+
+ /* mmap the UIO device */
+ params = (struct xapm_param *)mmap(0, MAP_SIZE , PROT_READ|PROT_WRITE,
+ MAP_SHARED , fd, getpagesize());
+ if (params == MAP_FAILED)
+ perror("mmap failed\n");
+
+ if (params->mode == 1)
+ printf("AXI PMON is in Advanced Mode\n");
+ else if (params->mode == 2)
+ printf("AXI PMON is in Profile Mode\n");
+ else
+ printf("AXI PMON is in trace Mode\n");
+
+ start(fd);
+
+ close(fd);
+ munmap((u32 *)baseaddr, MAP_SIZE);
+ munmap(params, MAP_SIZE);
+
+ return 0;
+}
diff --git a/samples/xilinx_apm/xaxipmon.c b/samples/xilinx_apm/xaxipmon.c
new file mode 100644
index 000000000000..94a4e7511057
--- /dev/null
+++ b/samples/xilinx_apm/xaxipmon.c
@@ -0,0 +1,1269 @@
+#include "xaxipmon.h"
+/*****************************************************************************/
+/**
+*
+* This function resets all Metric Counters and Sampled Metric Counters of
+* AXI Performance Monitor.
+*
+* @return XST_SUCCESS
+*
+*
+* @note None.
+*
+******************************************************************************/
+int resetmetriccounter(void)
+{
+ u32 regval;
+
+ /*
+ * Write the reset value to the Control register to reset
+ * Metric counters
+ */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval | XAPM_CR_MCNTR_RESET_MASK));
+ /*
+ * Release from Reset
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval & ~(XAPM_CR_MCNTR_RESET_MASK)));
+ return XST_SUCCESS;
+
+}
+
+/*****************************************************************************/
+/**
+*
+* This function resets Global Clock Counter of AXI Performance Monitor
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+void resetglobalclkcounter(void)
+{
+
+ u32 regval;
+
+ /*
+ * Write the reset value to the Control register to reset
+ * Global Clock Counter
+ */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval | XAPM_CR_GCC_RESET_MASK));
+
+ /*
+ * Release from Reset
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval & ~(XAPM_CR_GCC_RESET_MASK)));
+
+}
+
+/*****************************************************************************/
+/**
+*
+* This function resets Streaming FIFO of AXI Performance Monitor
+*
+* @return XST_SUCCESS
+*
+* @note None.
+*
+******************************************************************************/
+int resetfifo(void)
+{
+ u32 regval;
+
+ /* Check Event Logging is enabled in Hardware */
+ if (params->eventlog == 0)
+ /*Event Logging not enabled in Hardware*/
+ return XST_SUCCESS;
+
+ /*
+ * Write the reset value to the Control register to reset
+ * FIFO
+ */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval | XAPM_CR_FIFO_RESET_MASK));
+ /*
+ * Release from Reset
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval & ~(XAPM_CR_FIFO_RESET_MASK)));
+
+ return XST_SUCCESS;
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Ranges for Incrementers depending on parameters passed.
+*
+* @param incrementer specifies the Incrementer for which Ranges
+* need to be set
+* @param rangehigh specifies the Upper limit in 32 bit Register
+* @param rangelow specifies the Lower limit in 32 bit Register
+*
+* @return None.
+*
+* @note None
+*
+*****************************************************************************/
+void setincrementerrange(u8 incrementer, u16 rangehigh, u16 rangelow)
+{
+ u32 regval;
+
+ /*
+ * Write to the specified Range register
+ */
+ regval = rangehigh << 16;
+ regval |= rangelow;
+ writereg(baseaddr,
+ (XAPM_RANGE0_OFFSET + (incrementer * 16)), regval);
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the Ranges of Incrementers Registers.
+*
+* @param incrementer specifies the Incrementer for which Ranges
+* need to be returned.
+* @param rangehigh specifies the user reference variable which returns
+* the Upper Range Value of the specified Incrementer.
+* @param rangelow specifies the user reference variable which returns
+* the Lower Range Value of the specified Incrementer.
+*
+* @return None.
+*
+* @note None
+*
+*****************************************************************************/
+void getincrementerrange(u8 incrementer, u16 *rangehigh, u16 *rangelow)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, (XAPM_RANGE0_OFFSET +
+ (incrementer * 16)));
+
+ *rangelow = regval & 0xFFFF;
+ *rangehigh = (regval >> 16) & 0xFFFF;
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets the Sample Interval Register
+*
+* @param sampleinterval is the Sample Interval
+*
+* @return None
+*
+* @note None.
+*
+*****************************************************************************/
+void setsampleinterval(u32 sampleinterval)
+{
+ /*
+ * Set Sample Interval
+ */
+ writereg(baseaddr, XAPM_SI_LOW_OFFSET, sampleinterval);
+
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of Sample Interval Register
+*
+* @param sampleinterval is a pointer where Sample Interval register
+* contents are returned.
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+void getsampleinterval(u32 *sampleinterval)
+{
+ /*
+ * Set Sample Interval Lower
+ */
+ *sampleinterval = readreg(baseaddr, XAPM_SI_LOW_OFFSET);
+
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets metrics for specified Counter in the corresponding
+* Metric Selector Register.
+*
+* @param slot is the slot ID for which specified counter has to
+* be connected.
+* @param metrics is one of the Metric Sets. User has to use
+* XAPM_METRIC_SET_* macros in xaxipmon.h for this parameter
+* @param counter is the Counter Number.
+* The valid values are 0 to 9.
+*
+* @return XST_SUCCESS if Success
+* XST_FAILURE if Failure
+*
+* @note None.
+*
+*****************************************************************************/
+int setmetrics(u8 slot, u8 metrics, u8 counter)
+{
+ u32 regval;
+ u32 mask;
+
+ /* Find mask value to force zero in counternum byte range */
+ if (counter == 0 || counter == 4 || counter == 8)
+ mask = 0xFFFFFF00;
+ else if (counter == 1 || counter == 5 || counter == 9)
+ mask = 0xFFFF00FF;
+ else if (counter == 2 || counter == 6)
+ mask = 0xFF00FFFF;
+ else
+ mask = 0x00FFFFFF;
+
+ if (counter <= 3) {
+ regval = readreg(baseaddr, XAPM_MSR0_OFFSET);
+ regval = regval & mask;
+ regval = regval | (metrics << (counter * 8));
+ regval = regval | (slot << (counter * 8 + 5));
+ writereg(baseaddr, XAPM_MSR0_OFFSET, regval);
+ } else if ((counter >= 4) && (counter <= 7)) {
+ counter = counter - 4;
+ regval = readreg(baseaddr, XAPM_MSR1_OFFSET);
+ regval = regval & mask;
+ regval = regval | (metrics << (counter * 8));
+ regval = regval | (slot << (counter * 8 + 5));
+ writereg(baseaddr, XAPM_MSR1_OFFSET, regval);
+ } else {
+ counter = counter - 8;
+ regval = readreg(baseaddr, XAPM_MSR2_OFFSET);
+
+ regval = regval & mask;
+ regval = regval | (metrics << (counter * 8));
+ regval = regval | (slot << (counter * 8 + 5));
+ writereg(baseaddr, XAPM_MSR2_OFFSET, regval);
+ }
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns metrics in the specified Counter from the corresponding
+* Metric Selector Register.
+*
+* @param counter is the Counter Number.
+* The valid values are 0 to 9.
+* @param metrics is a reference parameter from application where metrics
+* of specified counter is filled.
+* @praram slot is a reference parameter in which slot Id of
+* specified counter is filled
+* @return XST_SUCCESS if Success
+* XST_FAILURE if Failure
+*
+* @note None.
+*
+*****************************************************************************/
+int getmetrics(u8 counter, u8 *metrics, u8 *slot)
+{
+ u32 regval;
+
+ if (counter <= 3) {
+ regval = readreg(baseaddr, XAPM_MSR0_OFFSET);
+ *metrics = (regval >> (counter * 8)) & 0x1F;
+ *slot = (regval >> (counter * 8 + 5)) & 0x7;
+ } else if ((counter >= 4) && (counter <= 7)) {
+ counter = counter - 4;
+ regval = readreg(baseaddr, XAPM_MSR1_OFFSET);
+ *metrics = (regval >> (counter * 8)) & 0x1F;
+ *slot = (regval >> (counter * 8 + 5)) & 0x7;
+ } else {
+ counter = counter - 8;
+ regval = readreg(baseaddr, XAPM_MSR2_OFFSET);
+ *metrics = (regval >> (counter * 8)) & 0x1F;
+ *slot = (regval >> (counter * 8 + 5)) & 0x7;
+ }
+ return XST_SUCCESS;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Global Clock Counter Register.
+*
+* @param cnthigh is the user space pointer with which upper 32 bits
+* of Global Clock Counter has to be filled
+* @param cntlow is the user space pointer with which lower 32 bits
+* of Global Clock Counter has to be filled
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void getglobalclkcounter(u32 *cnthigh, u32 *cntlow)
+{
+ *cnthigh = 0x0;
+ *cntlow = 0x0;
+
+ /*
+ * If Counter width is 64 bit then Counter Value has to be
+ * filled at CntHighValue address also.
+ */
+ if (params->globalcntwidth == 64) {
+ /* Bits[63:32] exists at XAPM_GCC_HIGH_OFFSET */
+ *cnthigh = readreg(baseaddr, XAPM_GCC_HIGH_OFFSET);
+ }
+ /* Bits[31:0] exists at XAPM_GCC_LOW_OFFSET */
+ *cntlow = readreg(baseaddr, XAPM_GCC_LOW_OFFSET);
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Metric Counter Register.
+*
+* @param counter is the number of the Metric Counter to be read.
+* Use the XAPM_METRIC_COUNTER* defines for the counter number in
+* xaxipmon.h. The valid values are 0 (XAPM_METRIC_COUNTER_0) to
+* 9 (XAPM_METRIC_COUNTER_9).
+* @return regval is the content of specified Metric Counter.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getmetriccounter(u32 counter)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr,
+ (XAPM_MC0_OFFSET + (counter * 16)));
+ return regval;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Sampled Metric Counter Register.
+*
+* @param counter is the number of the Sampled Metric Counter to read.
+* Use the XAPM_METRIC_COUNTER* defines for the counter number in
+* xaxipmon.h. The valid values are 0 (XAPM_METRIC_COUNTER_0) to
+* 9 (XAPM_METRIC_COUNTER_9).
+*
+* @return regval is the content of specified Sampled Metric Counter.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getsampledmetriccounter(u32 counter)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, (XAPM_SMC0_OFFSET +
+ (counter * 16)));
+ return regval;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Incrementer Register.
+*
+* @param incrementer is the number of the Incrementer register to
+* read.Use the XAPM_INCREMENTER_* defines for the Incrementer
+* number.The valid values are 0 (XAPM_INCREMENTER_0) to
+* 9 (XAPM_INCREMENTER_9).
+* @param incrementer is the number of the specified Incrementer
+* register
+* @return regval is content of specified Metric Incrementer register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getincrementer(u32 incrementer)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, (XAPM_INC0_OFFSET +
+ (incrementer * 16)));
+ return regval;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Sampled Incrementer Register.
+*
+* @param incrementer is the number of the Sampled Incrementer
+* register to read.Use the XAPM_INCREMENTER_* defines for the
+* Incrementer number.The valid values are 0 (XAPM_INCREMENTER_0)
+* to 9 (XAPM_INCREMENTER_9).
+* @param incrementer is the number of the specified Sampled
+* Incrementer register
+* @return regval is content of specified Sampled Incrementer register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getsampledincrementer(u32 incrementer)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, (XAPM_SINC0_OFFSET +
+ (incrementer * 16)));
+ return regval;
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Software-written Data Register.
+*
+* @param swdata is the Software written Data.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setswdatareg(u32 swdata)
+{
+ /*
+ * Set Software-written Data Register
+ */
+ writereg(baseaddr, XAPM_SWD_OFFSET, swdata);
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns contents of Software-written Data Register.
+*
+* @return swdata.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getswdatareg(void)
+{
+ u32 swdata;
+
+ /*
+ * Set Metric Selector Register
+ */
+ swdata = (u32)readreg(baseaddr, XAPM_SWD_OFFSET);
+ return swdata;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables the following in the AXI Performance Monitor:
+* - Event logging
+*
+* @param flagenables is a value to write to the flag enables
+* register defined by XAPM_FEC_OFFSET. It is recommended
+* to use the XAPM_FEC_*_MASK mask bits to generate.
+* A value of 0x0 will disable all events to the event
+* log streaming FIFO.
+*
+* @return XST_SUCCESS
+*
+* @note None
+*
+******************************************************************************/
+int starteventlog(u32 flagenables)
+{
+ u32 regval;
+
+ /* Read current register value */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ /* Now write to flag enables register */
+ writereg(baseaddr, XAPM_FEC_OFFSET, flagenables);
+ /* Write the new value to the Control register to
+ * enable event logging */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_EVENTLOG_ENABLE_MASK);
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function disables the following in the AXI Performance Monitor:
+* - Event logging
+*
+* @return XST_SUCCESS
+*
+* @note None
+*
+******************************************************************************/
+int stopeventlog(void)
+{
+ u32 regval;
+
+ /* Read current register value */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ /* Write the new value to the Control register to disable
+ * event logging */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~XAPM_CR_EVENTLOG_ENABLE_MASK);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables the following in the AXI Performance Monitor:
+* - Global clock counter
+* - All metric counters
+* - All sampled metric counters
+*
+* @param sampleinterval is the sample interval
+* @return XST_SUCCESS
+*
+* @note None
+******************************************************************************/
+int startcounters(u32 sampleinterval)
+{
+ u32 regval;
+
+ /* Read current register value */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ /* Global Clock Counter is present in Advanced Mode only */
+ if (params->mode == 1)
+ regval = regval | XAPM_CR_GCC_ENABLE_MASK;
+ /*
+ * Write the new value to the Control register to enable
+ * global clock counter and metric counters
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval | XAPM_CR_MCNTR_ENABLE_MASK);
+
+ /* Set, enable, and load sampled counters */
+ setsampleinterval(sampleinterval);
+ loadsic();
+ enablesic();
+
+ return XST_SUCCESS;
+}
+
+/****************************************************************************/
+/**
+*
+* This function disables the following in the AXI Performance Monitor:
+* - Global clock counter
+* - All metric counters
+*
+* @return XST_SUCCESS
+*
+* @note None
+*
+******************************************************************************/
+int stopcounters(void)
+{
+ u32 regval;
+
+ /* Read current register value */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ /* Global Clock Counter is present in Advanced Mode only */
+ if (params->mode == 1)
+ regval = regval & ~XAPM_CR_GCC_ENABLE_MASK;
+
+ /*
+ * Write the new value to the Control register to disable
+ * global clock counter and metric counters
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~XAPM_CR_MCNTR_ENABLE_MASK);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables Metric Counters.
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void enablemetricscounter(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_MCNTR_ENABLE_MASK);
+}
+/****************************************************************************/
+/**
+*
+* This function disables the Metric Counters.
+*
+* @return None
+*
+* @note None
+*
+*****************************************************************************/
+void disablemetricscounter(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~(XAPM_CR_MCNTR_ENABLE_MASK));
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets the Upper and Lower Ranges for specified Metric Counter
+* Log Enable Register.Event Logging starts when corresponding Metric Counter
+* value falls in between these ranges
+*
+* @param counter is the Metric Counter number for which
+* Ranges are to be assigned.Use the XAPM_METRIC_COUNTER*
+* defines for the counter number in xaxipmon.h.
+* The valid values are 0 (XAPM_METRIC_COUNTER_0) to
+* 9 (XAPM_METRIC_COUNTER_9).
+* @param rangehigh specifies the Upper limit in 32 bit Register
+* @param rangelow specifies the Lower limit in 32 bit Register
+* @return None
+*
+* @note None.
+*
+*****************************************************************************/
+void setlogenableranges(u32 counter, u16 rangehigh, u16 rangelow)
+{
+ u32 regval;
+
+ /*
+ * Write the specified Ranges to corresponding Metric Counter Log
+ * Enable Register
+ */
+ regval = rangehigh << 16;
+ regval |= rangelow;
+ writereg(baseaddr, (XAPM_MC0LOGEN_OFFSET +
+ (counter * 16)), regval);
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the Ranges of specified Metric Counter Log
+* Enable Register.
+*
+* @param counter is the Metric Counter number for which
+* Ranges are to be returned.Use the XAPM_METRIC_COUNTER*
+* defines for the counter number in xaxipmon.h.
+* The valid values are 0 (XAPM_METRIC_COUNTER_0) to
+* 9 (XAPM_METRIC_COUNTER_9).
+*
+* @param rangehigh specifies the user reference variable which returns
+* the Upper Range Value of the specified Metric Counter
+* Log Enable Register.
+* @param rangelow specifies the user reference variable which returns
+* the Lower Range Value of the specified Metric Counter
+* Log Enable Register.
+*
+* @note None.
+*
+*****************************************************************************/
+void getlogenableranges(u32 counter, u16 *rangehigh, u16 *rangelow)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr,
+ (XAPM_MC0LOGEN_OFFSET + (counter * 16)));
+
+ *rangelow = regval & 0xFFFF;
+ *rangehigh = (regval >> 16) & 0xFFFF;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables Event Logging.
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void enableeventlog(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_EVENTLOG_ENABLE_MASK);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables External trigger pulse so that Metric Counters can be
+* started on external trigger pulse for a slot.
+*
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void enablemctrigger(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_MCNTR_EXTTRIGGER_MASK);
+}
+
+/****************************************************************************/
+/**
+*
+* This function disables the External trigger pulse used to start Metric
+* Counters on external trigger pulse for a slot.
+*
+* @return None
+*
+* @note None
+*
+*****************************************************************************/
+void disablemctrigger(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~(XAPM_CR_MCNTR_EXTTRIGGER_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables External trigger pulse for Event Log
+* so that Event Logging can be started on external trigger pulse for a slot.
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void enableeventlogtrigger(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_EVTLOG_EXTTRIGGER_MASK);
+}
+
+/****************************************************************************/
+/**
+*
+* This function disables the External trigger pulse used to start Event
+* Log on external trigger pulse for a slot.
+*
+* @return None
+*
+* @note None
+*
+*****************************************************************************/
+void disableeventlogtrigger(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~(XAPM_CR_EVTLOG_EXTTRIGGER_MASK));
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns a name for a given Metric.
+*
+* @param metrics is one of the Metric Sets. User has to use
+* XAPM_METRIC_SET_* macros in xaxipmon.h for this parameter
+*
+* @return const char *
+*
+* @note None
+*
+*****************************************************************************/
+const char *getmetricname(u8 metrics)
+{
+ if (metrics == XAPM_METRIC_SET_0)
+ return "Write Transaction Count";
+ if (metrics == XAPM_METRIC_SET_1)
+ return "Read Transaction Count";
+ if (metrics == XAPM_METRIC_SET_2)
+ return "Write Byte Count";
+ if (metrics == XAPM_METRIC_SET_3)
+ return "Read Byte Count";
+ if (metrics == XAPM_METRIC_SET_4)
+ return "Write Beat Count";
+ if (metrics == XAPM_METRIC_SET_5)
+ return "Total Read Latency";
+ if (metrics == XAPM_METRIC_SET_6)
+ return "Total Write Latency";
+ if (metrics == XAPM_METRIC_SET_7)
+ return "Slv_Wr_Idle_Cnt";
+ if (metrics == XAPM_METRIC_SET_8)
+ return "Mst_Rd_Idle_Cnt";
+ if (metrics == XAPM_METRIC_SET_9)
+ return "Num_BValids";
+ if (metrics == XAPM_METRIC_SET_10)
+ return "Num_WLasts";
+ if (metrics == XAPM_METRIC_SET_11)
+ return "Num_RLasts";
+ if (metrics == XAPM_METRIC_SET_12)
+ return "Minimum Write Latency";
+ if (metrics == XAPM_METRIC_SET_13)
+ return "Maximum Write Latency";
+ if (metrics == XAPM_METRIC_SET_14)
+ return "Minimum Read Latency";
+ if (metrics == XAPM_METRIC_SET_15)
+ return "Maximum Read Latency";
+ if (metrics == XAPM_METRIC_SET_16)
+ return "Transfer Cycle Count";
+ if (metrics == XAPM_METRIC_SET_17)
+ return "Packet Count";
+ if (metrics == XAPM_METRIC_SET_18)
+ return "Data Byte Count";
+ if (metrics == XAPM_METRIC_SET_19)
+ return "Position Byte Count";
+ if (metrics == XAPM_METRIC_SET_20)
+ return "Null Byte Count";
+ if (metrics == XAPM_METRIC_SET_21)
+ return "Slv_Idle_Cnt";
+ if (metrics == XAPM_METRIC_SET_22)
+ return "Mst_Idle_Cnt";
+ if (metrics == XAPM_METRIC_SET_30)
+ return "External event count";
+ return "Unsupported";
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Write ID in Latency ID register to capture Write
+* Latency metrics.
+*
+* @param writeid is the Write ID to be written in Latency ID register.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setwriteid(u32 writeid)
+{
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_ID_OFFSET);
+ regval = regval & ~(XAPM_ID_WID_MASK);
+ regval = regval | writeid;
+ writereg(baseaddr, XAPM_ID_OFFSET, regval);
+ } else {
+ writereg(baseaddr, XAPM_ID_OFFSET, writeid);
+ }
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Read ID in Latency ID register to capture
+* Read Latency metrics.
+*
+* @param readid is the Read ID to be written in Latency ID register.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setreadid(u32 readid)
+{
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_ID_OFFSET);
+ regval = regval & ~(XAPM_ID_RID_MASK);
+ regval = regval | (readid << 16);
+ writereg(baseaddr, XAPM_ID_OFFSET, regval);
+ } else {
+ writereg(baseaddr, XAPM_RID_OFFSET, readid);
+ }
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns Write ID in Latency ID register.
+*
+* @return writeid is the required Write ID in Latency ID register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getwriteid(void)
+{
+
+ u32 writeid;
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_ID_OFFSET);
+ writeid = regval & XAPM_ID_WID_MASK;
+ } else {
+ writeid = XAPM_IDMASK_OFFSET;
+ }
+
+ return writeid;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns Read ID in Latency ID register.
+*
+* @return readid is the required Read ID in Latency ID register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getreadid(void)
+{
+
+ u32 readid;
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_ID_OFFSET);
+ regval = regval & XAPM_ID_RID_MASK;
+ readid = regval >> 16;
+ } else {
+ readid = XAPM_RID_OFFSET;
+ }
+
+ return readid;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets Latency Start point to calculate write latency.
+*
+* @param Param can be 0 - XAPM_LATENCY_ADDR_ISSUE
+* or 1 - XAPM_LATENCY_ADDR_ACCEPT
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void setwrlatencystart(u8 param)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ if (param == XAPM_LATENCY_ADDR_ACCEPT)
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval |
+ XAPM_CR_WRLATENCY_START_MASK);
+ else
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr,
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_WRLATENCY_START_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets Latency End point to calculate write latency.
+*
+* @param Param can be 0 - XAPM_LATENCY_LASTWR
+* or 1 - XAPM_LATENCY_FIRSTWR
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void setwrlatencyend(u8 param)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ if (param == XAPM_LATENCY_FIRSTWR)
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval |
+ XAPM_CR_WRLATENCY_END_MASK);
+ else
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr,
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_WRLATENCY_END_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets Latency Start point to calculate read latency.
+*
+* @param Param can be 0 - XAPM_LATENCY_ADDR_ISSUE
+* or 1 - XAPM_LATENCY_ADDR_ACCEPT
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void setrdlatencystart(u8 param)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ if (param == XAPM_LATENCY_ADDR_ACCEPT)
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval |
+ XAPM_CR_RDLATENCY_START_MASK);
+ else
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr,
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_RDLATENCY_START_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets Latency End point to calculate read latency.
+*
+* @param Param can be 0 - XAPM_LATENCY_LASTRD
+* or 1 - XAPM_LATENCY_FIRSTRD
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void setrdlatencyend(u8 param)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ if (param == XAPM_LATENCY_FIRSTRD)
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval |
+ XAPM_CR_RDLATENCY_END_MASK);
+ else
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr,
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_RDLATENCY_END_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns Write Latency Start point.
+*
+* @return Returns 0 - XAPM_LATENCY_ADDR_ISSUE or
+* 1 - XAPM_LATENCY_ADDR_ACCEPT
+*
+* @note None
+*
+******************************************************************************/
+u8 getwrlatencystart(void)
+{
+ u8 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ regval = regval & XAPM_CR_WRLATENCY_START_MASK;
+ if (regval != XAPM_LATENCY_ADDR_ISSUE)
+ return XAPM_LATENCY_ADDR_ACCEPT;
+ else
+ return XAPM_LATENCY_ADDR_ISSUE;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns Write Latency End point.
+*
+* @return Returns 0 - XAPM_LATENCY_LASTWR or
+* 1 - XAPM_LATENCY_FIRSTWR.
+*
+* @note None
+*
+******************************************************************************/
+u8 getwrlatencyend(void)
+{
+ u8 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ regval = regval & XAPM_CR_WRLATENCY_END_MASK;
+ if (regval != XAPM_LATENCY_LASTWR)
+ return XAPM_LATENCY_FIRSTWR;
+ else
+ return XAPM_LATENCY_LASTWR;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns read Latency Start point.
+*
+* @return Returns 0 - XAPM_LATENCY_ADDR_ISSUE or
+* 1 - XAPM_LATENCY_ADDR_ACCEPT
+*
+* @note None
+*
+******************************************************************************/
+u8 getrdlatencystart(void)
+{
+ u8 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ regval = regval & XAPM_CR_RDLATENCY_START_MASK;
+
+ if (regval != XAPM_LATENCY_ADDR_ISSUE)
+ return XAPM_LATENCY_ADDR_ACCEPT;
+ else
+ return XAPM_LATENCY_ADDR_ISSUE;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns Read Latency End point.
+*
+* @return Returns 0 - XAPM_LATENCY_LASTRD or
+* 1 - XAPM_LATENCY_FIRSTRD.
+*
+* @note None
+*
+******************************************************************************/
+u8 getrdlatencyend(void)
+{
+ u8 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ regval = regval & XAPM_CR_RDLATENCY_END_MASK;
+ if (regval != XAPM_LATENCY_LASTRD)
+ return XAPM_LATENCY_FIRSTRD;
+ else
+ return XAPM_LATENCY_LASTRD;
+
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Write ID Mask in ID Mask register.
+*
+* @param wrmask is the Write ID mask to be written in ID register.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setwriteidmask(u32 wrmask)
+{
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_IDMASK_OFFSET);
+ regval = regval & ~(XAPM_MASKID_WID_MASK);
+ regval = regval | wrmask;
+ writereg(baseaddr, XAPM_IDMASK_OFFSET, regval);
+ } else {
+ writereg(baseaddr, XAPM_IDMASK_OFFSET, wrmask);
+ }
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Read ID Mask in ID Mask register.
+*
+* @param rdmask is the Read ID mask to be written in ID Mask register.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setreadidmask(u32 rdmask)
+{
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_IDMASK_OFFSET);
+ regval = regval & ~(XAPM_MASKID_RID_MASK);
+ regval = regval | (rdmask << 16);
+ writereg(baseaddr, XAPM_IDMASK_OFFSET, regval);
+ } else {
+ writereg(baseaddr, XAPM_RIDMASK_OFFSET, rdmask);
+ }
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns Write ID Mask in ID Mask register.
+*
+* @return wrmask is the required Write ID Mask in ID Mask register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getwriteidmask(void)
+{
+
+ u32 wrmask;
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_IDMASK_OFFSET);
+ wrmask = regval & XAPM_MASKID_WID_MASK;
+ } else {
+ wrmask = XAPM_IDMASK_OFFSET;
+ }
+ return wrmask;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns Read ID Mask in ID Mask register.
+*
+* @return rdmask is the required Read ID Mask in ID Mask register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getreadidmask(void)
+{
+
+ u32 rdmask;
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_IDMASK_OFFSET);
+ regval = regval & XAPM_MASKID_RID_MASK;
+ rdmask = regval >> 16;
+ } else {
+ rdmask = XAPM_RIDMASK_OFFSET;
+ }
+ return rdmask;
+}
diff --git a/samples/xilinx_apm/xaxipmon.h b/samples/xilinx_apm/xaxipmon.h
new file mode 100644
index 000000000000..85e0e902a1c5
--- /dev/null
+++ b/samples/xilinx_apm/xaxipmon.h
@@ -0,0 +1,943 @@
+#ifndef XAXIPMON_H /* Prevent circular inclusions */
+#define XAXIPMON_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdbool.h>
+
+
+#define XST_SUCCESS 0
+#define XST_FAILURE 1
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#define XAPM_GCC_HIGH_OFFSET 0x0000 /* Global Clock Counter
+ 32 to 63 bits */
+#define XAPM_GCC_LOW_OFFSET 0x0004 /* Global Clock Counter Lower
+ 0-31 bits */
+#define XAPM_SI_HIGH_OFFSET 0x0020 /* Sample Interval MSB */
+#define XAPM_SI_LOW_OFFSET 0x0024 /* Sample Interval LSB */
+#define XAPM_SICR_OFFSET 0x0028 /* Sample Interval Control
+ Register */
+#define XAPM_SR_OFFSET 0x002C /* Sample Register */
+#define XAPM_GIE_OFFSET 0x0030 /* Global Interrupt Enable
+ Register */
+#define XAPM_IE_OFFSET 0x0034 /* Interrupt Enable Register */
+#define XAPM_IS_OFFSET 0x0038 /* Interrupt Status Register */
+
+#define XAPM_MSR0_OFFSET 0x0044 /* Metric Selector 0 Register*/
+#define XAPM_MSR1_OFFSET 0x0048 /* Metric Selector 1 Register*/
+#define XAPM_MSR2_OFFSET 0x004C /* Metric Selector 2 Register*/
+
+#define XAPM_MC0_OFFSET 0x0100 /* Metric Counter 0 Register */
+#define XAPM_INC0_OFFSET 0x0104 /* Incrementer 0 Register */
+#define XAPM_RANGE0_OFFSET 0x0108 /* Range 0 Register */
+#define XAPM_MC0LOGEN_OFFSET 0x010C /* Metric Counter 0
+ Log Enable Register */
+#define XAPM_MC1_OFFSET 0x0110 /* Metric Counter 1 Register */
+#define XAPM_INC1_OFFSET 0x0114 /* Incrementer 1 Register */
+#define XAPM_RANGE1_OFFSET 0x0118 /* Range 1 Register */
+#define XAPM_MC1LOGEN_OFFSET 0x011C /* Metric Counter 1
+ Log Enable Register */
+#define XAPM_MC2_OFFSET 0x0120 /* Metric Counter 2 Register */
+#define XAPM_INC2_OFFSET 0x0124 /* Incrementer 2 Register */
+#define XAPM_RANGE2_OFFSET 0x0128 /* Range 2 Register */
+#define XAPM_MC2LOGEN_OFFSET 0x012C /* Metric Counter 2
+ Log Enable Register */
+#define XAPM_MC3_OFFSET 0x0130 /* Metric Counter 3 Register */
+#define XAPM_INC3_OFFSET 0x0134 /* Incrementer 3 Register */
+#define XAPM_RANGE3_OFFSET 0x0138 /* Range 3 Register */
+#define XAPM_MC3LOGEN_OFFSET 0x013C /* Metric Counter 3
+ Log Enable Register */
+#define XAPM_MC4_OFFSET 0x0140 /* Metric Counter 4 Register */
+#define XAPM_INC4_OFFSET 0x0144 /* Incrementer 4 Register */
+#define XAPM_RANGE4_OFFSET 0x0148 /* Range 4 Register */
+#define XAPM_MC4LOGEN_OFFSET 0x014C /* Metric Counter 4
+ Log Enable Register */
+#define XAPM_MC5_OFFSET 0x0150 /* Metric Counter 5
+ Register */
+#define XAPM_INC5_OFFSET 0x0154 /* Incrementer 5 Register */
+#define XAPM_RANGE5_OFFSET 0x0158 /* Range 5 Register */
+#define XAPM_MC5LOGEN_OFFSET 0x015C /* Metric Counter 5
+ Log Enable Register */
+#define XAPM_MC6_OFFSET 0x0160 /* Metric Counter 6
+ Register */
+#define XAPM_INC6_OFFSET 0x0164 /* Incrementer 6 Register */
+#define XAPM_RANGE6_OFFSET 0x0168 /* Range 6 Register */
+#define XAPM_MC6LOGEN_OFFSET 0x016C /* Metric Counter 6
+ Log Enable Register */
+#define XAPM_MC7_OFFSET 0x0170 /* Metric Counter 7
+ Register */
+#define XAPM_INC7_OFFSET 0x0174 /* Incrementer 7 Register */
+#define XAPM_RANGE7_OFFSET 0x0178 /* Range 7 Register */
+#define XAPM_MC7LOGEN_OFFSET 0x017C /* Metric Counter 7
+ Log Enable Register */
+#define XAPM_MC8_OFFSET 0x0180 /* Metric Counter 8
+ Register */
+#define XAPM_INC8_OFFSET 0x0184 /* Incrementer 8 Register */
+#define XAPM_RANGE8_OFFSET 0x0188 /* Range 8 Register */
+#define XAPM_MC8LOGEN_OFFSET 0x018C /* Metric Counter 8
+ Log Enable Register */
+#define XAPM_MC9_OFFSET 0x0190 /* Metric Counter 9
+ Register */
+#define XAPM_INC9_OFFSET 0x0194 /* Incrementer 9 Register */
+#define XAPM_RANGE9_OFFSET 0x0198 /* Range 9 Register */
+#define XAPM_MC9LOGEN_OFFSET 0x019C /* Metric Counter 9
+ Log Enable Register */
+
+#define XAPM_MC10_OFFSET 0x01A0 /* Metric Counter 10
+ Register */
+#define XAPM_MC11_OFFSET 0x01B0 /* Metric Counter 11
+ Register */
+#define XAPM_MC12_OFFSET 0x0500 /* Metric Counter 12
+ Register */
+#define XAPM_MC13_OFFSET 0x0510 /* Metric Counter 13
+ Register */
+#define XAPM_MC14_OFFSET 0x0520 /* Metric Counter 14
+ Register */
+#define XAPM_MC15_OFFSET 0x0530 /* Metric Counter 15
+ Register */
+#define XAPM_MC16_OFFSET 0x0540 /* Metric Counter 16
+ Register */
+#define XAPM_MC17_OFFSET 0x0550 /* Metric Counter 17
+ Register */
+#define XAPM_MC18_OFFSET 0x0560 /* Metric Counter 18
+ Register */
+#define XAPM_MC19_OFFSET 0x0570 /* Metric Counter 19
+ Register */
+#define XAPM_MC20_OFFSET 0x0580 /* Metric Counter 20
+ Register */
+#define XAPM_MC21_OFFSET 0x0590 /* Metric Counter 21
+ Register */
+#define XAPM_MC22_OFFSET 0x05A0 /* Metric Counter 22
+ Register */
+#define XAPM_MC23_OFFSET 0x05B0 /* Metric Counter 23
+ Register */
+#define XAPM_MC24_OFFSET 0x0700 /* Metric Counter 24
+ Register */
+#define XAPM_MC25_OFFSET 0x0710 /* Metric Counter 25
+ Register */
+#define XAPM_MC26_OFFSET 0x0720 /* Metric Counter 26
+ Register */
+#define XAPM_MC27_OFFSET 0x0730 /* Metric Counter 27
+ Register */
+#define XAPM_MC28_OFFSET 0x0740 /* Metric Counter 28
+ Register */
+#define XAPM_MC29_OFFSET 0x0750 /* Metric Counter 29
+ Register */
+#define XAPM_MC30_OFFSET 0x0760 /* Metric Counter 30
+ Register */
+#define XAPM_MC31_OFFSET 0x0770 /* Metric Counter 31
+ Register */
+#define XAPM_MC32_OFFSET 0x0780 /* Metric Counter 32
+ Register */
+#define XAPM_MC33_OFFSET 0x0790 /* Metric Counter 33
+ Register */
+#define XAPM_MC34_OFFSET 0x07A0 /* Metric Counter 34
+ Register */
+#define XAPM_MC35_OFFSET 0x07B0 /* Metric Counter 35
+ Register */
+#define XAPM_MC36_OFFSET 0x0900 /* Metric Counter 36
+ Register */
+#define XAPM_MC37_OFFSET 0x0910 /* Metric Counter 37
+ Register */
+#define XAPM_MC38_OFFSET 0x0920 /* Metric Counter 38
+ Register */
+#define XAPM_MC39_OFFSET 0x0930 /* Metric Counter 39
+ Register */
+#define XAPM_MC40_OFFSET 0x0940 /* Metric Counter 40
+ Register */
+#define XAPM_MC41_OFFSET 0x0950 /* Metric Counter 41
+ Register */
+#define XAPM_MC42_OFFSET 0x0960 /* Metric Counter 42
+ Register */
+#define XAPM_MC43_OFFSET 0x0970 /* Metric Counter 43
+ Register */
+#define XAPM_MC44_OFFSET 0x0980 /* Metric Counter 44
+ Register */
+#define XAPM_MC45_OFFSET 0x0990 /* Metric Counter 45
+ Register */
+#define XAPM_MC46_OFFSET 0x09A0 /* Metric Counter 46
+ Register */
+#define XAPM_MC47_OFFSET 0x09B0 /* Metric Counter 47
+ Register */
+
+#define XAPM_SMC0_OFFSET 0x0200 /* Sampled Metric Counter
+ 0 Register */
+#define XAPM_SINC0_OFFSET 0x0204 /* Sampled Incrementer
+ 0 Register */
+#define XAPM_SMC1_OFFSET 0x0210 /* Sampled Metric Counter
+ 1 Register */
+#define XAPM_SINC1_OFFSET 0x0214 /* Sampled Incrementer
+ 1 Register */
+#define XAPM_SMC2_OFFSET 0x0220 /* Sampled Metric Counter
+ 2 Register */
+#define XAPM_SINC2_OFFSET 0x0224 /* Sampled Incrementer
+ 2 Register */
+#define XAPM_SMC3_OFFSET 0x0230 /* Sampled Metric Counter
+ 3 Register */
+#define XAPM_SINC3_OFFSET 0x0234 /* Sampled Incrementer
+ 3 Register */
+#define XAPM_SMC4_OFFSET 0x0240 /* Sampled Metric Counter
+ 4 Register */
+#define XAPM_SINC4_OFFSET 0x0244 /* Sampled Incrementer
+ 4 Register */
+#define XAPM_SMC5_OFFSET 0x0250 /* Sampled Metric Counter
+ 5 Register */
+#define XAPM_SINC5_OFFSET 0x0254 /* Sampled Incrementer
+ 5 Register */
+#define XAPM_SMC6_OFFSET 0x0260 /* Sampled Metric Counter
+ 6 Register */
+#define XAPM_SINC6_OFFSET 0x0264 /* Sampled Incrementer
+ 6 Register */
+#define XAPM_SMC7_OFFSET 0x0270 /* Sampled Metric Counter
+ 7 Register */
+#define XAPM_SINC7_OFFSET 0x0274 /* Sampled Incrementer
+ 7 Register */
+#define XAPM_SMC8_OFFSET 0x0280 /* Sampled Metric Counter
+ 8 Register */
+#define XAPM_SINC8_OFFSET 0x0284 /* Sampled Incrementer
+ 8 Register */
+#define XAPM_SMC9_OFFSET 0x0290 /* Sampled Metric Counter
+ 9 Register */
+#define XAPM_SINC9_OFFSET 0x0294 /* Sampled Incrementer
+ 9 Register */
+#define XAPM_SMC10_OFFSET 0x02A0 /* Sampled Metric Counter
+ 10 Register */
+#define XAPM_SMC11_OFFSET 0x02B0 /* Sampled Metric Counter
+ 11 Register */
+#define XAPM_SMC12_OFFSET 0x0600 /* Sampled Metric Counter
+ 12 Register */
+#define XAPM_SMC13_OFFSET 0x0610 /* Sampled Metric Counter
+ 13 Register */
+#define XAPM_SMC14_OFFSET 0x0620 /* Sampled Metric Counter
+ 14 Register */
+#define XAPM_SMC15_OFFSET 0x0630 /* Sampled Metric Counter
+ 15 Register */
+#define XAPM_SMC16_OFFSET 0x0640 /* Sampled Metric Counter
+ 16 Register */
+#define XAPM_SMC17_OFFSET 0x0650 /* Sampled Metric Counter
+ 17 Register */
+#define XAPM_SMC18_OFFSET 0x0660 /* Sampled Metric Counter
+ 18 Register */
+#define XAPM_SMC19_OFFSET 0x0670 /* Sampled Metric Counter
+ 19 Register */
+#define XAPM_SMC20_OFFSET 0x0680 /* Sampled Metric Counter
+ 20 Register */
+#define XAPM_SMC21_OFFSET 0x0690 /* Sampled Metric Counter
+ 21 Register */
+#define XAPM_SMC22_OFFSET 0x06A0 /* Sampled Metric Counter
+ 22 Register */
+#define XAPM_SMC23_OFFSET 0x06B0 /* Sampled Metric Counter
+ 23 Register */
+#define XAPM_SMC24_OFFSET 0x0800 /* Sampled Metric Counter
+ 24 Register */
+#define XAPM_SMC25_OFFSET 0x0810 /* Sampled Metric Counter
+ 25 Register */
+#define XAPM_SMC26_OFFSET 0x0820 /* Sampled Metric Counter
+ 26 Register */
+#define XAPM_SMC27_OFFSET 0x0830 /* Sampled Metric Counter
+ 27 Register */
+#define XAPM_SMC28_OFFSET 0x0840 /* Sampled Metric Counter
+ 28 Register */
+#define XAPM_SMC29_OFFSET 0x0850 /* Sampled Metric Counter
+ 29 Register */
+#define XAPM_SMC30_OFFSET 0x0860 /* Sampled Metric Counter
+ 30 Register */
+#define XAPM_SMC31_OFFSET 0x0870 /* Sampled Metric Counter
+ 31 Register */
+#define XAPM_SMC32_OFFSET 0x0880 /* Sampled Metric Counter
+ 32 Register */
+#define XAPM_SMC33_OFFSET 0x0890 /* Sampled Metric Counter
+ 33 Register */
+#define XAPM_SMC34_OFFSET 0x08A0 /* Sampled Metric Counter
+ 34 Register */
+#define XAPM_SMC35_OFFSET 0x08B0 /* Sampled Metric Counter
+ 35 Register */
+#define XAPM_SMC36_OFFSET 0x0A00 /* Sampled Metric Counter
+ 36 Register */
+#define XAPM_SMC37_OFFSET 0x0A10 /* Sampled Metric Counter
+ 37 Register */
+#define XAPM_SMC38_OFFSET 0x0A20 /* Sampled Metric Counter
+ 38 Register */
+#define XAPM_SMC39_OFFSET 0x0A30 /* Sampled Metric Counter
+ 39 Register */
+#define XAPM_SMC40_OFFSET 0x0A40 /* Sampled Metric Counter
+ 40 Register */
+#define XAPM_SMC41_OFFSET 0x0A50 /* Sampled Metric Counter
+ 41 Register */
+#define XAPM_SMC42_OFFSET 0x0A60 /* Sampled Metric Counter
+ 42 Register */
+#define XAPM_SMC43_OFFSET 0x0A70 /* Sampled Metric Counter
+ 43 Register */
+#define XAPM_SMC44_OFFSET 0x0A80 /* Sampled Metric Counter
+ 44 Register */
+#define XAPM_SMC45_OFFSET 0x0A90 /* Sampled Metric Counter
+ 45 Register */
+#define XAPM_SMC46_OFFSET 0x0AA0 /* Sampled Metric Counter
+ 46 Register */
+#define XAPM_SMC47_OFFSET 0x0AB0 /* Sampled Metric Counter
+ 47 Register */
+
+#define XAPM_CTL_OFFSET 0x0300 /* Control Register */
+
+#define XAPM_ID_OFFSET 0x0304 /* Latency ID Register */
+
+#define XAPM_IDMASK_OFFSET 0x0308 /* ID Mask Register */
+
+#define XAPM_RID_OFFSET 0x030C /* Latency Write ID Register */
+
+#define XAPM_RIDMASK_OFFSET 0x0310 /* Read ID mask register */
+
+#define XAPM_FEC_OFFSET 0x0400 /* flag Enable
+ Control Register */
+
+#define XAPM_SWD_OFFSET 0x0404 /* Software-written
+ Data Register */
+
+#define XAPM_SICR_MCNTR_RST_MASK 0x00000100 /* Enable the Metric
+ Counter Reset */
+#define XAPM_SICR_LOAD_MASK 0x00000002 /* Load the Sample Interval
+ Register Value into
+ the counter */
+#define XAPM_SICR_ENABLE_MASK 0x00000001 /* Enable the downcounter */
+
+#define XAPM_IXR_MC9_OVERFLOW_MASK 0x00001000 /**< Metric Counter 9
+ * Overflow> */
+#define XAPM_IXR_MC8_OVERFLOW_MASK 0x00000800 /**< Metric Counter 8
+ * Overflow> */
+#define XAPM_IXR_MC7_OVERFLOW_MASK 0x00000400 /**< Metric Counter 7
+ * Overflow> */
+#define XAPM_IXR_MC6_OVERFLOW_MASK 0x00000200 /**< Metric Counter 6
+ * Overflow> */
+#define XAPM_IXR_MC5_OVERFLOW_MASK 0x00000100 /**< Metric Counter 5
+ * Overflow> */
+#define XAPM_IXR_MC4_OVERFLOW_MASK 0x00000080 /**< Metric Counter 4
+ * Overflow> */
+#define XAPM_IXR_MC3_OVERFLOW_MASK 0x00000040 /**< Metric Counter 3
+ * Overflow> */
+#define XAPM_IXR_MC2_OVERFLOW_MASK 0x00000020 /**< Metric Counter 2
+ * Overflow> */
+#define XAPM_IXR_MC1_OVERFLOW_MASK 0x00000010 /**< Metric Counter 1
+ * Overflow> */
+#define XAPM_IXR_MC0_OVERFLOW_MASK 0x00000008 /**< Metric Counter 0
+ * Overflow> */
+#define XAPM_IXR_FIFO_FULL_MASK 0x00000004 /**< Event Log FIFO
+ * full> */
+#define XAPM_IXR_SIC_OVERFLOW_MASK 0x00000002 /**< Sample Interval
+ * Counter Overflow */
+#define XAPM_IXR_GCC_OVERFLOW_MASK 0x00000001 /**< Global Clock
+ Counter Overflow */
+#define XAPM_IXR_ALL_MASK (XAPM_IXR_SIC_OVERFLOW_MASK | \
+ XAPM_IXR_GCC_OVERFLOW_MASK | \
+ XAPM_IXR_FIFO_FULL_MASK | \
+ XAPM_IXR_MC0_OVERFLOW_MASK | \
+ XAPM_IXR_MC1_OVERFLOW_MASK | \
+ XAPM_IXR_MC2_OVERFLOW_MASK | \
+ XAPM_IXR_MC3_OVERFLOW_MASK | \
+ XAPM_IXR_MC4_OVERFLOW_MASK | \
+ XAPM_IXR_MC5_OVERFLOW_MASK | \
+ XAPM_IXR_MC6_OVERFLOW_MASK | \
+ XAPM_IXR_MC7_OVERFLOW_MASK | \
+ XAPM_IXR_MC8_OVERFLOW_MASK | \
+ XAPM_IXR_MC9_OVERFLOW_MASK)
+
+#define XAPM_CR_FIFO_RESET_MASK 0x02000000
+ /**< FIFO Reset */
+#define XAPM_CR_MUXSEL_MASK 0x01000000
+ /**< Mux Selector mask */
+#define XAPM_CR_GCC_RESET_MASK 0x00020000
+ /**< Global Clk
+ Counter Reset */
+#define XAPM_CR_GCC_ENABLE_MASK 0x00010000
+ /**< Global Clk
+ Counter Enable */
+#define XAPM_CR_EVTLOG_EXTTRIGGER_MASK 0x00000200
+ /**< Enable External trigger
+ to start event Log */
+#define XAPM_CR_EVENTLOG_ENABLE_MASK 0x00000100
+ /**< Event Log Enable */
+#define XAPM_CR_RDLATENCY_END_MASK 0x00000080
+ /**< Write Latency
+ End point */
+#define XAPM_CR_RDLATENCY_START_MASK 0x00000040
+ /**< Read Latency
+ Start point */
+#define XAPM_CR_WRLATENCY_END_MASK 0x00000020
+ /**< Write Latency
+ End point */
+#define XAPM_CR_WRLATENCY_START_MASK 0x00000010
+ /**< Write Latency
+ Start point */
+#define XAPM_CR_IDFILTER_ENABLE_MASK 0x00000008
+ /**< ID Filter Enable */
+#define XAPM_CR_MCNTR_EXTTRIGGER_MASK 0x00000004
+ /**< Enable External
+ trigger to start
+ Metric Counters */
+#define XAPM_CR_MCNTR_RESET_MASK 0x00000002
+ /**< Metrics Counter
+ Reset */
+#define XAPM_CR_MCNTR_ENABLE_MASK 0x00000001
+ /**< Metrics Counter
+ Enable */
+
+#define XAPM_ID_RID_MASK 0xFFFF0000 /**< Read ID */
+
+#define XAPM_ID_WID_MASK 0x0000FFFF /**< Write ID */
+
+#define XAPM_MASKID_RID_MASK 0xFFFF0000 /**< Read ID Mask */
+
+#define XAPM_MASKID_WID_MASK 0x0000FFFF /**< Write ID Mask*/
+
+
+#define XAPM_MAX_COUNTERS 10 /**< Maximum number of Counters */
+#define XAPM_MAX_COUNTERS_PROFILE 48 /**< Maximum number of Counters in
+ profile mode */
+
+#define XAPM_METRIC_COUNTER_0 0 /**< Metric Counter 0 Register Index */
+#define XAPM_METRIC_COUNTER_1 1 /**< Metric Counter 1 Register Index */
+#define XAPM_METRIC_COUNTER_2 2 /**< Metric Counter 2 Register Index */
+#define XAPM_METRIC_COUNTER_3 3 /**< Metric Counter 3 Register Index */
+#define XAPM_METRIC_COUNTER_4 4 /**< Metric Counter 4 Register Index */
+#define XAPM_METRIC_COUNTER_5 5 /**< Metric Counter 5 Register Index */
+#define XAPM_METRIC_COUNTER_6 6 /**< Metric Counter 6 Register Index */
+#define XAPM_METRIC_COUNTER_7 7 /**< Metric Counter 7 Register Index */
+#define XAPM_METRIC_COUNTER_8 8 /**< Metric Counter 8 Register Index */
+#define XAPM_METRIC_COUNTER_9 9 /**< Metric Counter 9 Register Index */
+
+#define XAPM_INCREMENTER_0 0 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_1 1 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_2 2 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_3 3 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_4 4 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_5 5 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_6 6 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_7 7 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_8 8 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_9 9 /**< Metric Counter 0 Register Index */
+
+#define XAPM_METRIC_SET_0 0 /**< Write Transaction Count */
+#define XAPM_METRIC_SET_1 1 /**< Read Transaction Count */
+#define XAPM_METRIC_SET_2 2 /**< Write Byte Count */
+#define XAPM_METRIC_SET_3 3 /**< Read Byte Count */
+#define XAPM_METRIC_SET_4 4 /**< Write Beat Count */
+#define XAPM_METRIC_SET_5 5 /**< Total Read Latency */
+#define XAPM_METRIC_SET_6 6 /**< Total Write Latency */
+#define XAPM_METRIC_SET_7 7 /**< Slv_Wr_Idle_Cnt */
+#define XAPM_METRIC_SET_8 8 /**< Mst_Rd_Idle_Cnt */
+#define XAPM_METRIC_SET_9 9 /**< Num_BValids */
+#define XAPM_METRIC_SET_10 10 /**< Num_WLasts */
+#define XAPM_METRIC_SET_11 11 /**< Num_RLasts */
+#define XAPM_METRIC_SET_12 12 /**< Minimum Write Latency */
+#define XAPM_METRIC_SET_13 13 /**< Maximum Write Latency */
+#define XAPM_METRIC_SET_14 14 /**< Minimum Read Latency */
+#define XAPM_METRIC_SET_15 15 /**< Maximum Read Latency */
+#define XAPM_METRIC_SET_16 16 /**< Transfer Cycle Count */
+#define XAPM_METRIC_SET_17 17 /**< Packet Count */
+#define XAPM_METRIC_SET_18 18 /**< Data Byte Count */
+#define XAPM_METRIC_SET_19 19 /**< Position Byte Count */
+#define XAPM_METRIC_SET_20 20 /**< Null Byte Count */
+#define XAPM_METRIC_SET_21 21 /**< Slv_Idle_Cnt */
+#define XAPM_METRIC_SET_22 22 /**< Mst_Idle_Cnt */
+#define XAPM_METRIC_SET_30 30 /**< External event count */
+
+#define XAPM_MAX_AGENTS 8 /**< Maximum number of Agents */
+
+#define XAPM_FLAG_WRADDR 0x00000001 /**< Write Address flag */
+#define XAPM_FLAG_FIRSTWR 0x00000002 /**< First Write flag */
+#define XAPM_FLAG_LASTWR 0x00000004 /**< Last Write flag */
+#define XAPM_FLAG_RESPONSE 0x00000008 /**< Response flag */
+#define XAPM_FLAG_RDADDR 0x00000010 /**< Read Address flag */
+#define XAPM_FLAG_FIRSTRD 0x00000020 /**< First Read flag */
+#define XAPM_FLAG_LASTRD 0x00000040 /**< Last Read flag */
+#define XAPM_FLAG_SWDATA 0x00010000 /**< Software-written Data flag */
+#define XAPM_FLAG_EVENT 0x00020000 /**< Last Read flag */
+#define XAPM_FLAG_EVNTSTOP 0x00040000 /**< Last Read flag */
+#define XAPM_FLAG_EVNTSTART 0x00080000 /**< Last Read flag */
+#define XAPM_FLAG_GCCOVF 0x00100000 /**< Global Clock Counter Overflow
+ * flag */
+#define XAPM_FLAG_SCLAPSE 0x00200000 /**< Sample Counter Lapse flag */
+#define XAPM_FLAG_MC0 0x00400000 /**< Metric Counter 0 flag */
+#define XAPM_FLAG_MC1 0x00800000 /**< Metric Counter 1 flag */
+#define XAPM_FLAG_MC2 0x01000000 /**< Metric Counter 2 flag */
+#define XAPM_FLAG_MC3 0x02000000 /**< Metric Counter 3 flag */
+#define XAPM_FLAG_MC4 0x04000000 /**< Metric Counter 4 flag */
+#define XAPM_FLAG_MC5 0x08000000 /**< Metric Counter 5 flag */
+#define XAPM_FLAG_MC6 0x10000000 /**< Metric Counter 6 flag */
+#define XAPM_FLAG_MC7 0x20000000 /**< Metric Counter 7 flag */
+#define XAPM_FLAG_MC8 0x40000000 /**< Metric Counter 8 flag */
+#define XAPM_FLAG_MC9 0x80000000 /**< Metric Counter 9 flag */
+
+#define XAPM_LATENCY_ADDR_ISSUE 0 /**< Address Issue as start
+ point for Latency calculation*/
+#define XAPM_LATENCY_ADDR_ACCEPT 1 /**< Address Acceptance as start
+ point for Latency calculation*/
+#define XAPM_LATENCY_LASTRD 0 /**< Last Read as end point for
+ Latency calculation */
+#define XAPM_LATENCY_LASTWR 0 /**< Last Write as end point for
+ Latency calculation */
+#define XAPM_LATENCY_FIRSTRD 1 /**< First Read as end point for
+ Latency calculation */
+#define XAPM_LATENCY_FIRSTWR 1 /**< First Write as end point for
+ Latency calculation */
+
+#define XAPM_MODE_TRACE 2 /**< APM in Trace mode */
+
+#define XAPM_MODE_PROFILE 1 /**< APM in Profile mode */
+
+#define XAPM_MODE_ADVANCED 0 /**< APM in Advanced mode */
+
+typedef unsigned char u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef unsigned long ulong;
+
+ulong baseaddr;
+
+struct xapm_param {
+ u32 mode;
+ u32 maxslots;
+ u32 eventcnt;
+ u32 eventlog;
+ u32 sampledcnt;
+ u32 numcounters;
+ u32 metricwidth;
+ u32 sampledwidth;
+ u32 globalcntwidth;
+ u32 scalefactor;
+ u32 isr;
+ bool is_32bit_filter;
+};
+
+static struct xapm_param *params;
+
+/*****************************************************************************/
+/**
+*
+* Read a register of the AXI Performance Monitor device. This macro provides
+* register access to all registers using the register offsets defined above.
+*
+* @param baseaddr contains the base address of the device.
+* @param regoffset is the offset of the register to read.
+*
+* @return The contents of the register.
+*
+* @note C-style Signature:
+* u32 readreg(u32 baseaddr, u32 regoffset);
+*
+******************************************************************************/
+#define readreg(baseaddr, regoffset) \
+ (*(u32 *)(baseaddr + regoffset))
+
+/*****************************************************************************/
+/**
+*
+* Write a register of the AXI Performance Monitor device. This macro provides
+* register access to all registers using the register offsets defined above.
+*
+* @param baseaddr contains the base address of the device.
+* @param regoffset is the offset of the register to write.
+* @param data is the value to write to the register.
+*
+* @return None.
+*
+* @note C-style Signature:
+* void writereg(u32 baseaddr,
+* u32 regoffset,u32 Data)
+*
+******************************************************************************/
+#define writereg(baseaddr, regoffset, data) \
+ (*(u32 *)(baseaddr + regoffset) = data)
+
+/****************************************************************************/
+/**
+*
+* This routine enables the Global Interrupt.
+*
+* @note C-Style signature:
+* void intrglobalenable()
+*
+*****************************************************************************/
+#define intrglobalenable() \
+ writereg(baseaddr, XAPM_GIE_OFFSET, 1)
+
+
+/****************************************************************************/
+/**
+*
+* This routine disables the Global Interrupt.
+*
+* @note C-Style signature:
+* void intrglobaldisable(void)
+*
+*****************************************************************************/
+#define intrglobaldisable() \
+ writereg(baseaddr, XAPM_GIE_OFFSET, 0)
+
+/****************************************************************************/
+/**
+*
+* This routine enables interrupt(s). Use the XAPM_IXR_* constants defined in
+* xaxipmon_hw.h to create the bit-mask to enable interrupts.
+*
+* @param mask is the mask to enable. Bit positions of 1 will be enabled.
+* Bit positions of 0 will keep the previous setting. This mask is
+* formed by OR'ing XAPM_IXR__* bits defined in xaxipmon_hw.h.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void intrenable(u32 mask)
+*
+*****************************************************************************/
+#define intrenable(mask) \
+ writereg(baseaddr, XAPM_IE_OFFSET, readreg(baseaddr, \
+ XAPM_IE_OFFSET) | mask);
+
+
+/****************************************************************************/
+/**
+*
+* This routine disable interrupt(s). Use the XAPM_IXR_* constants defined in
+* xaxipmon_hw.h to create the bit-mask to disable interrupts.
+*
+* @param mask is the mask to disable. Bit positions of 1 will be
+* disabled. Bit positions of 0 will keep the previous setting.
+* This mask is formed by OR'ing XAPM_IXR_* bits defined in
+* xaxipmon_hw.h.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void intrdisable(u32 mask)
+*
+*****************************************************************************/
+#define intrdisable(mask) \
+ writereg(baseaddr, XAPM_IE_OFFSET, readreg(baseaddr, \
+ XAPM_IE_OFFSET) | mask);
+
+/****************************************************************************/
+/**
+*
+* This routine clears the specified interrupt(s).
+*
+* @param mask is the mask to clear. Bit positions of 1 will be cleared.
+* This mask is formed by OR'ing XAPM_IXR_* bits defined in
+* xaxipmon_hw.h.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void intrclear(u32 mask)
+*
+*****************************************************************************/
+#define intrclear(mask) \
+ writereg(baseaddr, XAPM_IS_OFFSET, readreg(baseaddr, \
+ XAPM_IS_OFFSET) | mask);
+
+/****************************************************************************/
+/**
+*
+* This routine returns the Interrupt Status Register.
+*
+* @return isr value updated by kernel driver
+*
+* @note This macro returns isr value updated by kernel driver.
+* C-Style signature:
+* void intrgetstatus(void)
+*
+*****************************************************************************/
+#define intrgetstatus() (params->isr)
+
+/****************************************************************************/
+/**
+*
+* This routine returns the Interrupt Status Register.
+*
+* @return Interrupt Status Register contents
+*
+* @note C-Style signature:
+* void intrhwgetstatus(void)
+*
+*****************************************************************************/
+#define intrhwgetstatus() (params->isr)
+
+/****************************************************************************/
+/**
+*
+* This function enables the Global Clock Counter.
+*
+* @note C-Style signature:
+* void enablegcc(void);
+*
+*****************************************************************************/
+#define enablegcc() \
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr, \
+ XAPM_CTL_OFFSET) | XAPM_CR_GCC_ENABLE_MASK);
+
+/****************************************************************************/
+/**
+*
+* This function disbles the Global Clock Counter.
+*
+* @note C-Style signature:
+* void disablegcc(void);
+*
+*****************************************************************************/
+#define disablegcc() \
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr, \
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_GCC_ENABLE_MASK));
+
+/****************************************************************************/
+/**
+*
+* This function enables the specified flag in flag Control Register.
+*
+* @param flag is one of the XAPM_FLAG_* masks defined in xaxipmon.h
+*
+* @return None
+*
+* @note C-Style signature:
+* void enableflag(void);
+*
+*****************************************************************************/
+#define enableflag(flag) \
+ writereg(baseaddr, XAPM_FEC_OFFSET, \
+ readreg(baseaddr, XAPM_FEC_OFFSET) | flag);
+
+/****************************************************************************/
+/**
+*
+* This function disables the specified flag in flag Control Register.
+*
+* @param flag is one of the XAPM_FLAG_* masks defined in xaxipmon.h*
+* @return None
+*
+* @note C-Style signature:
+* void disableflag(void);
+*
+*****************************************************************************/
+#define disableflag(flag) \
+ writereg(baseaddr, XAPM_FEC_OFFSET, \
+ readreg(baseaddr, XAPM_FEC_OFFSET) & ~(flag));
+
+/****************************************************************************/
+/**
+*
+* This function loads the sample interval register value into the sample
+* interval counter.
+*
+* @note C-Style signature:
+* void loadsic(void);
+*
+*****************************************************************************/
+#define loadsic() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, XAPM_SICR_LOAD_MASK)
+
+
+/****************************************************************************/
+/**
+*
+* This enables the down count of the sample interval counter.
+*
+* @note C-Style signature:
+* void enablesic(void);
+*
+*****************************************************************************/
+#define enablesic() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, XAPM_SICR_ENABLE_MASK)
+
+/****************************************************************************/
+/**
+*
+* This disables the down count of the sample interval counter.
+*
+* @note C-Style signature:
+* void disablesic(void);
+*
+*****************************************************************************/
+#define disablesic() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, \
+ readreg(baseaddr, XAPM_SICR_OFFSET) & ~(XAPM_SICR_ENABLE_MASK));
+
+/****************************************************************************/
+/**
+*
+* This enables Reset of Metric Counters when Sample Interval Counter lapses.
+*
+* @note C-Style signature:
+* void enablemcreset(void);
+*
+*****************************************************************************/
+#define enablemcreset() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, XAPM_SICR_MCNTR_RST_MASK);
+
+/****************************************************************************/
+/**
+*
+* This disables the down count of the sample interval counter.
+*
+* @note C-Style signature:
+* void disablemcreset(void);
+*
+*****************************************************************************/
+#define disablemcreset() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, \
+ readreg(baseaddr, XAPM_SICR_OFFSET) & ~(XAPM_SICR_MCNTR_RST_MASK));
+
+/****************************************************************************/
+/**
+*
+* This function enables the ID Filter Masking.
+*
+* @note C-Style signature:
+* void enableidfilter(void);
+*
+*****************************************************************************/
+#define enableidfilter() \
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr, \
+ XAPM_CTL_OFFSET) | XAPM_CR_IDFILTER_ENABLE_MASK);
+
+/****************************************************************************/
+/**
+*
+* This function disbles the ID Filter masking.
+*
+* @note C-Style signature:
+* void disableidfilter(void);
+*
+*****************************************************************************/
+#define disableidfilter() \
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr, \
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_IDFILTER_ENABLE_MASK));
+
+/****************************************************************************/
+/**
+*
+* This function samples Metric Counters to Sampled Metric Counters by
+* reading Sample Register and also returns interval. i.e. the number of
+* clocks in between previous read to the current read of sample register.
+*
+* @return Interval. i.e. the number of clocks in between previous
+* read to the current read of sample register.
+*
+* @note C-Style signature:
+* u32 samplemetrics(void);
+*
+*****************************************************************************/
+#define samplemetrics() readreg(baseaddr, XAPM_SR_OFFSET);
+
+
+/************************** Function Prototypes *****************************/
+
+int resetmetriccounter(void);
+
+void resetglobalclkcounter(void);
+
+int resetfifo(void);
+
+void setincrementerrange(u8 incrementer, u16 rangehigh, u16 rangelow);
+
+void getincrementerrange(u8 incrementer, u16 *rangehigh, u16 *rangelow);
+
+void setsampleinterval(u32 sampleinterval);
+
+void getsampleinterval(u32 *sampleinterval);
+
+int setmetrics(u8 slot, u8 metrics, u8 counter);
+
+int getmetrics(u8 counter, u8 *metrics, u8 *slot);
+void getglobalclkcounter(u32 *cnthigh, u32 *cntlow);
+
+u32 getmetriccounter(u32 counter);
+
+u32 getsampledmetriccounter(u32 counter);
+
+u32 getincrementer(u32 incrementer);
+
+u32 getsampledincrementer(u32 incrementer);
+
+void setswdatareg(u32 swdata);
+
+u32 getswdatareg(void);
+
+int starteventlog(u32 flagenables);
+
+int stopeventlog(void);
+
+int startcounters(u32 sampleinterval);
+
+int stopcounters(void);
+
+void enablemetricscounter(void);
+
+void disablemetricscounter(void);
+
+void setlogenableranges(u32 counter, u16 rangehigh, u16 rangelow);
+
+void getlogenableranges(u32 counter, u16 *rangehigh, u16 *rangelow);
+
+void enableeventlog(void);
+
+void enablemctrigger(void);
+
+void disablemctrigger(void);
+
+void enableeventlogtrigger(void);
+
+void disableeventlogtrigger(void);
+
+const char *getmetricname(u8 metrics);
+
+void setwriteid(u32 writeid);
+
+void setreadid(u32 readid);
+
+u32 getwriteid(void);
+
+u32 getreadid(void);
+
+void setwrlatencystart(u8 param);
+
+void setwrlatencyend(u8 param);
+
+void setrdlatencystart(u8 param);
+
+void setrdlatencyend(u8 param);
+
+u8 getwrlatencystart(void);
+
+u8 getwrlatencyend(void);
+
+u8 getrdlatencystart(void);
+
+u8 getrdlatencyend(void);
+
+void setwriteidmask(u32 wrmask);
+
+void setreadidmask(u32 rdmask);
+
+u32 getwriteidmask(void);
+
+u32 getreadidmask(void);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* End of protection macro. */
diff --git a/sound/soc/xilinx/Kconfig b/sound/soc/xilinx/Kconfig
index 69973179ef15..3660a42f7673 100644
--- a/sound/soc/xilinx/Kconfig
+++ b/sound/soc/xilinx/Kconfig
@@ -1,4 +1,19 @@
# SPDX-License-Identifier: GPL-2.0-only
+config SND_SOC_XILINX_DP
+ tristate "Audio support for the the Xilinx DisplayPort"
+ select SND_DMAENGINE_PCM
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Audio support the for Xilinx DisplayPort.
+
+config SND_SOC_XILINX_SDI
+ tristate "Audio support for the the Xilinx SDI"
+ depends on DRM_XLNX_SDI
+ depends on VIDEO_XILINX_SDIRXSS
+ help
+ Select this option to enable Xilinx SDI Audio.This enables
+ SDI audio playback and capture using xilinx soft IP
+
config SND_SOC_XILINX_I2S
tristate "Audio support for the Xilinx I2S"
help
@@ -7,6 +22,26 @@ config SND_SOC_XILINX_I2S
mode, IP receives audio in AES format, extracts PCM and sends
PCM data. In receiver mode, IP receives PCM audio and
encapsulates PCM in AES format and sends AES data.
+ I2S playback and capture using xilinx soft IP
+
+config SND_SOC_XILINX_SPDIF
+ tristate "Audio support for the the Xilinx SPDIF"
+ help
+ Select this option to enable Xilinx SPDIF Audio.
+ Enabling this provides one of the component required in ASoC
+ audio pipeline.
+ This supports playback and capture usecases.
+
+config SND_SOC_XILINX_PL_SND_CARD
+ tristate "Audio support for the the Xilinx PL sound card"
+ depends on SND_SOC_XILINX_AUDIO_FORMATTER
+ depends on SND_SOC_XILINX_I2S
+ depends on SND_SOC_XILINX_SDI
+ select SND_SOC_HDMI_CODEC
+ help
+ Select this option to enable Xilinx PL sound card
+ support. This enables sound card using xilinx soft IPs
+ in audio pipeline.
config SND_SOC_XILINX_AUDIO_FORMATTER
tristate "Audio support for the the Xilinx audio formatter"
diff --git a/sound/soc/xilinx/Makefile b/sound/soc/xilinx/Makefile
index be7652ce7c13..aea78bae7e86 100644
--- a/sound/soc/xilinx/Makefile
+++ b/sound/soc/xilinx/Makefile
@@ -1,6 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SND_SOC_XILINX_DP) += xilinx-dp-pcm.o
+obj-$(CONFIG_SND_SOC_XILINX_DP) += xilinx-dp-codec.o
+obj-$(CONFIG_SND_SOC_XILINX_DP) += xilinx-dp-card.o
+obj-$(CONFIG_SND_SOC_XILINX_SDI) += xlnx_sdi_audio.o
snd-soc-xlnx-i2s-objs := xlnx_i2s.o
obj-$(CONFIG_SND_SOC_XILINX_I2S) += snd-soc-xlnx-i2s.o
+obj-$(CONFIG_SND_SOC_XILINX_SPDIF) += xlnx_spdif.o
+obj-$(CONFIG_SND_SOC_XILINX_PL_SND_CARD) += xlnx_pl_snd_card.o
snd-soc-xlnx-formatter-pcm-objs := xlnx_formatter_pcm.o
obj-$(CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER) += snd-soc-xlnx-formatter-pcm.o
snd-soc-xlnx-spdif-objs := xlnx_spdif.o
diff --git a/sound/soc/xilinx/xilinx-dp-card.c b/sound/soc/xilinx/xilinx-dp-card.c
new file mode 100644
index 000000000000..a149da095df8
--- /dev/null
+++ b/sound/soc/xilinx/xilinx-dp-card.c
@@ -0,0 +1,113 @@
+/*
+ * Xilinx DisplayPort SoC Sound Card support
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <sound/soc.h>
+
+static int xilinx_dp_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 256);
+ return 0;
+}
+
+static const struct snd_soc_ops xilinx_dp_ops = {
+ .startup = xilinx_dp_startup,
+};
+
+static struct snd_soc_dai_link xilinx_dp_dai_links[] = {
+ {
+ .name = "xilinx-dp0",
+ .stream_name = "xilinx-dp0",
+ .codec_dai_name = "xilinx-dp-snd-codec-dai",
+ .ops = &xilinx_dp_ops,
+ },
+ {
+ .name = "xilinx-dp1",
+ .stream_name = "xilinx-dp1",
+ .codec_dai_name = "xilinx-dp-snd-codec-dai",
+ .ops = &xilinx_dp_ops,
+ },
+
+};
+
+static struct snd_soc_card xilinx_dp_card = {
+ .name = "DisplayPort monitor",
+ .owner = THIS_MODULE,
+ .dai_link = xilinx_dp_dai_links,
+ .num_links = 2,
+};
+
+static int xilinx_dp_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &xilinx_dp_card;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *codec, *pcm;
+ int ret;
+
+ card->dev = &pdev->dev;
+
+ codec = of_parse_phandle(node, "xlnx,dp-snd-codec", 0);
+ if (!codec)
+ return -ENODEV;
+
+ pcm = of_parse_phandle(node, "xlnx,dp-snd-pcm", 0);
+ if (!pcm)
+ return -ENODEV;
+ xilinx_dp_dai_links[0].platform_of_node = pcm;
+ xilinx_dp_dai_links[0].cpu_of_node = codec;
+ xilinx_dp_dai_links[0].codec_of_node = codec;
+
+ pcm = of_parse_phandle(node, "xlnx,dp-snd-pcm", 1);
+ if (!pcm)
+ return -ENODEV;
+ xilinx_dp_dai_links[1].platform_of_node = pcm;
+ xilinx_dp_dai_links[1].cpu_of_node = codec;
+ xilinx_dp_dai_links[1].codec_of_node = codec;
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "Xilinx DisplayPort Sound Card probed\n");
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_dp_of_match[] = {
+ { .compatible = "xlnx,dp-snd-card", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xilinx_dp_of_match);
+
+static struct platform_driver xilinx_dp_aud_driver = {
+ .driver = {
+ .name = "xilinx-dp-snd-card",
+ .of_match_table = xilinx_dp_of_match,
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = xilinx_dp_probe,
+};
+module_platform_driver(xilinx_dp_aud_driver);
+
+MODULE_DESCRIPTION("Xilinx DisplayPort Sound Card module");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xilinx-dp-codec.c b/sound/soc/xilinx/xilinx-dp-codec.c
new file mode 100644
index 000000000000..af6e6b08c415
--- /dev/null
+++ b/sound/soc/xilinx/xilinx-dp-codec.c
@@ -0,0 +1,178 @@
+/*
+ * Xilinx DisplayPort Sound Codec support
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <sound/soc.h>
+
+/**
+ * struct xilinx_dp_codec - DisplayPort codec
+ * @aud_clk: audio clock
+ */
+struct xilinx_dp_codec {
+ struct clk *aud_clk;
+};
+
+struct xilinx_dp_codec_fmt {
+ unsigned long rate;
+ unsigned int snd_rate;
+};
+
+static struct snd_soc_dai_driver xilinx_dp_codec_dai = {
+ .name = "xilinx-dp-snd-codec-dai",
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_44100,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+};
+
+static const struct xilinx_dp_codec_fmt rates[] = {
+ {
+ .rate = 48000 * 512,
+ .snd_rate = SNDRV_PCM_RATE_48000
+ },
+ {
+ .rate = 44100 * 512,
+ .snd_rate = SNDRV_PCM_RATE_44100
+ }
+};
+
+static const struct snd_soc_component_driver xilinx_dp_component_driver = {
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static int xilinx_dp_codec_probe(struct platform_device *pdev)
+{
+ struct xilinx_dp_codec *codec;
+ unsigned int i;
+ unsigned long rate;
+ int ret;
+
+ codec = devm_kzalloc(&pdev->dev, sizeof(*codec), GFP_KERNEL);
+ if (!codec)
+ return -ENOMEM;
+
+ codec->aud_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(codec->aud_clk))
+ return PTR_ERR(codec->aud_clk);
+
+ ret = clk_prepare_enable(codec->aud_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable the aud_clk\n");
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rates); i++) {
+ clk_disable_unprepare(codec->aud_clk);
+ ret = clk_set_rate(codec->aud_clk, rates[i].rate);
+ clk_prepare_enable(codec->aud_clk);
+ if (ret)
+ continue;
+
+ rate = clk_get_rate(codec->aud_clk);
+ /* Ignore some offset +- 10 */
+ if (abs(rates[i].rate - rate) < 10) {
+ xilinx_dp_codec_dai.playback.rates = rates[i].snd_rate;
+ break;
+ }
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get required clock freq\n");
+ goto error_clk;
+ }
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &xilinx_dp_component_driver,
+ &xilinx_dp_codec_dai, 1);
+ if (ret)
+ goto error_clk;
+
+ platform_set_drvdata(pdev, codec);
+
+ dev_info(&pdev->dev, "Xilinx DisplayPort Sound Codec probed\n");
+
+ return 0;
+
+error_clk:
+ clk_disable_unprepare(codec->aud_clk);
+ return ret;
+}
+
+static int xilinx_dp_codec_dev_remove(struct platform_device *pdev)
+{
+ struct xilinx_dp_codec *codec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(codec->aud_clk);
+
+ return 0;
+}
+
+static int __maybe_unused xilinx_dp_codec_pm_suspend(struct device *dev)
+{
+ struct xilinx_dp_codec *codec = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(codec->aud_clk);
+
+ return 0;
+}
+
+static int __maybe_unused xilinx_dp_codec_pm_resume(struct device *dev)
+{
+ struct xilinx_dp_codec *codec = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(codec->aud_clk);
+ if (ret)
+ dev_err(dev, "failed to enable the aud_clk\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops xilinx_dp_codec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xilinx_dp_codec_pm_suspend,
+ xilinx_dp_codec_pm_resume)
+};
+
+static const struct of_device_id xilinx_dp_codec_of_match[] = {
+ { .compatible = "xlnx,dp-snd-codec", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_dp_codec_of_match);
+
+static struct platform_driver xilinx_dp_codec_driver = {
+ .driver = {
+ .name = "xilinx-dp-snd-codec",
+ .of_match_table = xilinx_dp_codec_of_match,
+ .pm = &xilinx_dp_codec_pm_ops,
+ },
+ .probe = xilinx_dp_codec_probe,
+ .remove = xilinx_dp_codec_dev_remove,
+};
+module_platform_driver(xilinx_dp_codec_driver);
+
+MODULE_DESCRIPTION("Xilinx DisplayPort Sound Codec module");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xilinx-dp-pcm.c b/sound/soc/xilinx/xilinx-dp-pcm.c
new file mode 100644
index 000000000000..fa8abe788cf7
--- /dev/null
+++ b/sound/soc/xilinx/xilinx-dp-pcm.c
@@ -0,0 +1,76 @@
+/*
+ * Xilinx DisplayPort Sound PCM support
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+static const struct snd_pcm_hardware xilinx_pcm_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+ .buffer_bytes_max = 128 * 1024,
+ .period_bytes_min = 256,
+ .period_bytes_max = 1024 * 1024,
+ .periods_min = 2,
+ .periods_max = 256,
+};
+
+static const struct snd_dmaengine_pcm_config xilinx_dmaengine_pcm_config = {
+ .pcm_hardware = &xilinx_pcm_hw,
+ .prealloc_buffer_size = 64 * 1024,
+};
+
+static int xilinx_dp_pcm_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ dev_set_name(&pdev->dev, pdev->dev.of_node->name);
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev,
+ &xilinx_dmaengine_pcm_config, 0);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "Xilinx DisplayPort Sound PCM probed\n");
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_dp_pcm_of_match[] = {
+ { .compatible = "xlnx,dp-snd-pcm", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_dp_pcm_of_match);
+
+static struct platform_driver xilinx_dp_pcm_driver = {
+ .driver = {
+ .name = "xilinx-dp-snd-pcm",
+ .of_match_table = xilinx_dp_pcm_of_match,
+ },
+ .probe = xilinx_dp_pcm_probe,
+};
+module_platform_driver(xilinx_dp_pcm_driver);
+
+MODULE_DESCRIPTION("Xilinx DisplayPort Sound PCM module");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c
index dc8721f4f56b..8215b9a4438c 100644
--- a/sound/soc/xilinx/xlnx_formatter_pcm.c
+++ b/sound/soc/xilinx/xlnx_formatter_pcm.c
@@ -17,6 +17,8 @@
#include <sound/soc.h>
#include <sound/pcm_params.h>
+#include "xlnx_snd_common.h"
+
#define DRV_NAME "xlnx_formatter_pcm"
#define XLNX_S2MM_OFFSET 0
@@ -56,7 +58,9 @@
#define CFG_S2MM_XFER_SHIFT 29
#define CFG_S2MM_PKG_MASK BIT(28)
+#define AUD_CTRL_DATA_WIDTH_MASK GENMASK(18, 16)
#define AUD_CTRL_DATA_WIDTH_SHIFT 16
+#define AUD_CTRL_ACTIVE_CH_MASK GENMASK(22, 19)
#define AUD_CTRL_ACTIVE_CH_SHIFT 19
#define PERIOD_CFG_PERIODS_SHIFT 16
@@ -82,7 +86,12 @@ struct xlnx_pcm_drv_data {
int mm2s_irq;
struct snd_pcm_substream *play_stream;
struct snd_pcm_substream *capture_stream;
+ struct platform_device *pdev;
+ struct device_node *nodes[XLNX_MAX_PATHS];
struct clk *axi_clk;
+ struct clk *mm2s_axis_clk;
+ struct clk *s2mm_axis_clk;
+ struct clk *aud_mclk;
};
/*
@@ -430,11 +439,13 @@ static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream,
u32 aes_reg1_val, aes_reg2_val;
int status;
u64 size;
+ struct pl_card_data *prv;
struct snd_soc_pcm_runtime *prtd = substream->private_data;
struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
DRV_NAME);
struct snd_pcm_runtime *runtime = substream->runtime;
struct xlnx_pcm_stream_param *stream_data = runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
active_ch = params_channels(params);
if (active_ch > stream_data->ch_limit)
@@ -467,6 +478,7 @@ static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream,
writel(high, stream_data->mmio + XLNX_AUD_BUFF_ADDR_MSB);
val = readl(stream_data->mmio + XLNX_AUD_CTRL);
+ val &= ~AUD_CTRL_DATA_WIDTH_MASK;
bits_per_sample = params_width(params);
switch (bits_per_sample) {
case 8:
@@ -488,6 +500,7 @@ static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ val &= ~AUD_CTRL_ACTIVE_CH_MASK;
val |= active_ch << AUD_CTRL_ACTIVE_CH_SHIFT;
writel(val, stream_data->mmio + XLNX_AUD_CTRL);
@@ -497,6 +510,12 @@ static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream,
bytes_per_ch = DIV_ROUND_UP(params_period_bytes(params), active_ch);
writel(bytes_per_ch, stream_data->mmio + XLNX_BYTES_PER_CH);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ prv = snd_soc_card_get_drvdata(rtd->card);
+ writel(prv->mclk_ratio,
+ stream_data->mmio + XLNX_AUD_FS_MULTIPLIER);
+ }
+
return 0;
}
@@ -559,10 +578,146 @@ static const struct snd_soc_component_driver xlnx_asoc_component = {
.pcm_new = xlnx_formatter_pcm_new,
};
+static int configure_mm2s(struct xlnx_pcm_drv_data *aud_drv_data,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ aud_drv_data->mm2s_axis_clk = devm_clk_get(dev, "m_axis_mm2s_aclk");
+ if (IS_ERR(aud_drv_data->mm2s_axis_clk)) {
+ ret = PTR_ERR(aud_drv_data->mm2s_axis_clk);
+ dev_err(dev, "failed to get m_axis_mm2s_aclk(%d)\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(aud_drv_data->mm2s_axis_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable m_axis_mm2s_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ aud_drv_data->aud_mclk = devm_clk_get(dev, "aud_mclk");
+ if (IS_ERR(aud_drv_data->aud_mclk)) {
+ ret = PTR_ERR(aud_drv_data->aud_mclk);
+ dev_err(dev, "failed to get aud_mclk(%d)\n", ret);
+ goto axis_clk_err;
+ }
+ ret = clk_prepare_enable(aud_drv_data->aud_mclk);
+ if (ret) {
+ dev_err(dev, "failed to enable aud_mclk(%d)\n", ret);
+ goto axis_clk_err;
+ }
+
+ aud_drv_data->mm2s_irq = platform_get_irq_byname(pdev,
+ "irq_mm2s");
+ if (aud_drv_data->mm2s_irq < 0) {
+ dev_err(dev, "xlnx audio mm2s irq resource failed\n");
+ ret = aud_drv_data->mm2s_irq;
+ goto mm2s_err;
+ }
+ ret = devm_request_irq(dev, aud_drv_data->mm2s_irq,
+ xlnx_mm2s_irq_handler, 0,
+ "xlnx_formatter_pcm_mm2s_irq",
+ dev);
+ if (ret) {
+ dev_err(dev, "xlnx audio mm2s irq request failed\n");
+ goto mm2s_err;
+ }
+ ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
+ XLNX_MM2S_OFFSET);
+ if (ret) {
+ dev_err(dev, "audio formatter reset failed\n");
+ goto mm2s_err;
+ }
+ xlnx_formatter_disable_irqs(aud_drv_data->mmio +
+ XLNX_MM2S_OFFSET,
+ SNDRV_PCM_STREAM_PLAYBACK);
+
+ aud_drv_data->nodes[XLNX_PLAYBACK] =
+ of_parse_phandle(dev->of_node, "xlnx,tx", 0);
+ if (!aud_drv_data->nodes[XLNX_PLAYBACK])
+ dev_err(dev, "tx node not found\n");
+ else
+ dev_info(dev,
+ "sound card device will use DAI link: %s\n",
+ (aud_drv_data->nodes[XLNX_PLAYBACK])->name);
+ of_node_put(aud_drv_data->nodes[XLNX_PLAYBACK]);
+
+ aud_drv_data->mm2s_presence = true;
+ return 0;
+
+mm2s_err:
+ clk_disable_unprepare(aud_drv_data->aud_mclk);
+axis_clk_err:
+ clk_disable_unprepare(aud_drv_data->mm2s_axis_clk);
+
+ return ret;
+}
+
+static int configure_s2mm(struct xlnx_pcm_drv_data *aud_drv_data,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ aud_drv_data->s2mm_axis_clk = devm_clk_get(dev, "s_axis_s2mm_aclk");
+ if (IS_ERR(aud_drv_data->s2mm_axis_clk)) {
+ ret = PTR_ERR(aud_drv_data->s2mm_axis_clk);
+ dev_err(dev, "failed to get s_axis_s2mm_aclk(%d)\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(aud_drv_data->s2mm_axis_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable s_axis_s2mm_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ aud_drv_data->s2mm_irq = platform_get_irq_byname(pdev, "irq_s2mm");
+ if (aud_drv_data->s2mm_irq < 0) {
+ dev_err(dev, "xlnx audio s2mm irq resource failed\n");
+ ret = aud_drv_data->s2mm_irq;
+ goto s2mm_err;
+ }
+ ret = devm_request_irq(dev, aud_drv_data->s2mm_irq,
+ xlnx_s2mm_irq_handler, 0,
+ "xlnx_formatter_pcm_s2mm_irq",
+ dev);
+ if (ret) {
+ dev_err(dev, "xlnx audio s2mm irq request failed\n");
+ goto s2mm_err;
+ }
+ ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
+ XLNX_S2MM_OFFSET);
+ if (ret) {
+ dev_err(dev, "audio formatter reset failed\n");
+ goto s2mm_err;
+ }
+ xlnx_formatter_disable_irqs(aud_drv_data->mmio +
+ XLNX_S2MM_OFFSET,
+ SNDRV_PCM_STREAM_CAPTURE);
+
+ aud_drv_data->nodes[XLNX_CAPTURE] =
+ of_parse_phandle(dev->of_node, "xlnx,rx", 0);
+ if (!aud_drv_data->nodes[XLNX_CAPTURE])
+ dev_err(dev, "rx node not found\n");
+ else
+ dev_info(dev, "sound card device will use DAI link: %s\n",
+ (aud_drv_data->nodes[XLNX_CAPTURE])->name);
+ of_node_put(aud_drv_data->nodes[XLNX_CAPTURE]);
+
+ aud_drv_data->s2mm_presence = true;
+ return 0;
+
+s2mm_err:
+ clk_disable_unprepare(aud_drv_data->s2mm_axis_clk);
+ return ret;
+}
+
static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
{
int ret;
u32 val;
+ size_t pdata_size;
struct xlnx_pcm_drv_data *aud_drv_data;
struct resource *res;
struct device *dev = &pdev->dev;
@@ -599,59 +754,15 @@ static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
val = readl(aud_drv_data->mmio + XLNX_AUD_CORE_CONFIG);
if (val & AUD_CFG_MM2S_MASK) {
- aud_drv_data->mm2s_presence = true;
- ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
- XLNX_MM2S_OFFSET);
- if (ret) {
- dev_err(dev, "audio formatter reset failed\n");
- goto clk_err;
- }
- xlnx_formatter_disable_irqs(aud_drv_data->mmio +
- XLNX_MM2S_OFFSET,
- SNDRV_PCM_STREAM_PLAYBACK);
-
- aud_drv_data->mm2s_irq = platform_get_irq_byname(pdev,
- "irq_mm2s");
- if (aud_drv_data->mm2s_irq < 0) {
- dev_err(dev, "xlnx audio mm2s irq resource failed\n");
- ret = aud_drv_data->mm2s_irq;
+ ret = configure_mm2s(aud_drv_data, pdev);
+ if (ret)
goto clk_err;
- }
- ret = devm_request_irq(dev, aud_drv_data->mm2s_irq,
- xlnx_mm2s_irq_handler, 0,
- "xlnx_formatter_pcm_mm2s_irq", dev);
- if (ret) {
- dev_err(dev, "xlnx audio mm2s irq request failed\n");
- goto clk_err;
- }
}
+
if (val & AUD_CFG_S2MM_MASK) {
- aud_drv_data->s2mm_presence = true;
- ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
- XLNX_S2MM_OFFSET);
- if (ret) {
- dev_err(dev, "audio formatter reset failed\n");
+ ret = configure_s2mm(aud_drv_data, pdev);
+ if (ret)
goto clk_err;
- }
- xlnx_formatter_disable_irqs(aud_drv_data->mmio +
- XLNX_S2MM_OFFSET,
- SNDRV_PCM_STREAM_CAPTURE);
-
- aud_drv_data->s2mm_irq = platform_get_irq_byname(pdev,
- "irq_s2mm");
- if (aud_drv_data->s2mm_irq < 0) {
- dev_err(dev, "xlnx audio s2mm irq resource failed\n");
- ret = aud_drv_data->s2mm_irq;
- goto clk_err;
- }
- ret = devm_request_irq(dev, aud_drv_data->s2mm_irq,
- xlnx_s2mm_irq_handler, 0,
- "xlnx_formatter_pcm_s2mm_irq",
- dev);
- if (ret) {
- dev_err(dev, "xlnx audio s2mm irq request failed\n");
- goto clk_err;
- }
}
dev_set_drvdata(dev, aud_drv_data);
@@ -663,6 +774,21 @@ static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
goto clk_err;
}
+ pdata_size = sizeof(aud_drv_data->nodes);
+ if (aud_drv_data->nodes[XLNX_PLAYBACK] ||
+ aud_drv_data->nodes[XLNX_CAPTURE])
+ aud_drv_data->pdev =
+ platform_device_register_resndata(dev,
+ "xlnx_snd_card",
+ PLATFORM_DEVID_AUTO,
+ NULL, 0,
+ &aud_drv_data->nodes,
+ pdata_size);
+ if (!aud_drv_data->pdev) {
+ dev_err(dev, "sound card device creation failed\n");
+ goto clk_err;
+ }
+ dev_info(dev, "pcm platform device registered\n");
return 0;
clk_err:
@@ -675,6 +801,8 @@ static int xlnx_formatter_pcm_remove(struct platform_device *pdev)
int ret = 0;
struct xlnx_pcm_drv_data *adata = dev_get_drvdata(&pdev->dev);
+ platform_device_unregister(adata->pdev);
+
if (adata->s2mm_presence)
ret = xlnx_formatter_pcm_reset(adata->mmio + XLNX_S2MM_OFFSET);
diff --git a/sound/soc/xilinx/xlnx_i2s.c b/sound/soc/xilinx/xlnx_i2s.c
index 8b353166ad44..b1a51b28a836 100644
--- a/sound/soc/xilinx/xlnx_i2s.c
+++ b/sound/soc/xilinx/xlnx_i2s.c
@@ -7,6 +7,7 @@
// Author: Praveen Vuppala <praveenv@xilinx.com>
// Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -22,15 +23,22 @@
#define I2S_CH0_OFFSET 0x30
#define I2S_I2STIM_VALID_MASK GENMASK(7, 0)
+struct xlnx_i2s_dev_data {
+ void __iomem *base;
+ struct clk *axi_clk;
+ struct clk *axis_clk;
+ struct clk *aud_mclk;
+};
+
static int xlnx_i2s_set_sclkout_div(struct snd_soc_dai *cpu_dai,
int div_id, int div)
{
- void __iomem *base = snd_soc_dai_get_drvdata(cpu_dai);
+ struct xlnx_i2s_dev_data *dev_data = snd_soc_dai_get_drvdata(cpu_dai);
if (!div || (div & ~I2S_I2STIM_VALID_MASK))
return -EINVAL;
- writel(div, base + I2S_I2STIM_OFFSET);
+ writel(div, dev_data->base + I2S_I2STIM_OFFSET);
return 0;
}
@@ -40,13 +48,13 @@ static int xlnx_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *i2s_dai)
{
u32 reg_off, chan_id;
- void __iomem *base = snd_soc_dai_get_drvdata(i2s_dai);
+ struct xlnx_i2s_dev_data *dev_data = snd_soc_dai_get_drvdata(i2s_dai);
chan_id = params_channels(params) / 2;
while (chan_id > 0) {
reg_off = I2S_CH0_OFFSET + ((chan_id - 1) * 4);
- writel(chan_id, base + reg_off);
+ writel(chan_id, dev_data->base + reg_off);
chan_id--;
}
@@ -56,18 +64,18 @@ static int xlnx_i2s_hw_params(struct snd_pcm_substream *substream,
static int xlnx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *i2s_dai)
{
- void __iomem *base = snd_soc_dai_get_drvdata(i2s_dai);
+ struct xlnx_i2s_dev_data *dev_data = snd_soc_dai_get_drvdata(i2s_dai);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- writel(1, base + I2S_CORE_CTRL_OFFSET);
+ writel(1, dev_data->base + I2S_CORE_CTRL_OFFSET);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- writel(0, base + I2S_CORE_CTRL_OFFSET);
+ writel(0, dev_data->base + I2S_CORE_CTRL_OFFSET);
break;
default:
return -EINVAL;
@@ -96,8 +104,8 @@ MODULE_DEVICE_TABLE(of, xlnx_i2s_of_match);
static int xlnx_i2s_probe(struct platform_device *pdev)
{
struct resource *res;
- void __iomem *base;
struct snd_soc_dai_driver *dai_drv;
+ struct xlnx_i2s_dev_data *dev_data;
int ret;
u32 ch, format, data_width;
struct device *dev = &pdev->dev;
@@ -107,10 +115,16 @@ static int xlnx_i2s_probe(struct platform_device *pdev)
if (!dai_drv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ dev_data = devm_kzalloc(&pdev->dev, sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+
+ dev_data->axi_clk = devm_clk_get(&pdev->dev, "s_axi_ctrl_aclk");
+ if (IS_ERR(dev_data->axi_clk)) {
+ ret = PTR_ERR(dev_data->axi_clk);
+ dev_err(&pdev->dev, "failed to get s_axi_ctrl_aclk(%d)\n", ret);
+ return ret;
+ }
ret = of_property_read_u32(node, "xlnx,num-channels", &ch);
if (ret < 0) {
@@ -143,6 +157,15 @@ static int xlnx_i2s_probe(struct platform_device *pdev)
dai_drv->playback.channels_max = ch;
dai_drv->playback.rates = SNDRV_PCM_RATE_8000_192000;
dai_drv->ops = &xlnx_i2s_dai_ops;
+
+ dev_data->axis_clk = devm_clk_get(&pdev->dev,
+ "s_axis_aud_aclk");
+ if (IS_ERR(dev_data->axis_clk)) {
+ ret = PTR_ERR(dev_data->axis_clk);
+ dev_err(&pdev->dev,
+ "failed to get s_axis_aud_aclk(%d)\n", ret);
+ return ret;
+ }
} else if (of_device_is_compatible(node, "xlnx,i2s-receiver-1.0")) {
dai_drv->name = "xlnx_i2s_capture";
dai_drv->capture.stream_name = "Capture";
@@ -151,30 +174,94 @@ static int xlnx_i2s_probe(struct platform_device *pdev)
dai_drv->capture.channels_max = ch;
dai_drv->capture.rates = SNDRV_PCM_RATE_8000_192000;
dai_drv->ops = &xlnx_i2s_dai_ops;
+
+ dev_data->axis_clk = devm_clk_get(&pdev->dev,
+ "m_axis_aud_aclk");
+ if (IS_ERR(dev_data->axis_clk)) {
+ ret = PTR_ERR(dev_data->axis_clk);
+ dev_err(&pdev->dev,
+ "failed to get m_axis_aud_aclk(%d)\n", ret);
+ return ret;
+ }
} else {
return -ENODEV;
}
- dev_set_drvdata(&pdev->dev, base);
+ dev_data->aud_mclk = devm_clk_get(&pdev->dev, "aud_mclk");
+ if (IS_ERR(dev_data->aud_mclk)) {
+ ret = PTR_ERR(dev_data->aud_mclk);
+ dev_err(&pdev->dev, "failed to get aud_mclk(%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dev_data->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable s_axi_ctrl_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dev_data->axis_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable axis_aud_aclk(%d)\n", ret);
+ goto err_axis_clk;
+ }
+
+ ret = clk_prepare_enable(dev_data->aud_mclk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable aud_mclk(%d)\n", ret);
+ goto err_aud_mclk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev_data->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev_data->base)) {
+ ret = PTR_ERR(dev_data->base);
+ goto clk_err;
+ }
+
+ dev_set_drvdata(&pdev->dev, dev_data);
ret = devm_snd_soc_register_component(&pdev->dev, &xlnx_i2s_component,
dai_drv, 1);
if (ret) {
dev_err(&pdev->dev, "i2s component registration failed\n");
- return ret;
+ goto clk_err;
}
dev_info(&pdev->dev, "%s DAI registered\n", dai_drv->name);
+ return 0;
+clk_err:
+ clk_disable_unprepare(dev_data->aud_mclk);
+err_aud_mclk:
+ clk_disable_unprepare(dev_data->axis_clk);
+err_axis_clk:
+ clk_disable_unprepare(dev_data->axi_clk);
+
return ret;
}
+static int xlnx_i2s_remove(struct platform_device *pdev)
+{
+ struct xlnx_i2s_dev_data *dev_data = dev_get_drvdata(&pdev->dev);
+
+ clk_disable_unprepare(dev_data->aud_mclk);
+ clk_disable_unprepare(dev_data->axis_clk);
+ clk_disable_unprepare(dev_data->axi_clk);
+
+ return 0;
+}
+
static struct platform_driver xlnx_i2s_aud_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = xlnx_i2s_of_match,
},
.probe = xlnx_i2s_probe,
+ .remove = xlnx_i2s_remove,
};
module_platform_driver(xlnx_i2s_aud_driver);
diff --git a/sound/soc/xilinx/xlnx_pl_snd_card.c b/sound/soc/xilinx/xlnx_pl_snd_card.c
new file mode 100644
index 000000000000..2fa7c9ceb08d
--- /dev/null
+++ b/sound/soc/xilinx/xlnx_pl_snd_card.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ASoC sound card support
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "xlnx_snd_common.h"
+
+#define I2S_CLOCK_RATIO 384
+#define XLNX_MAX_PL_SND_DEV 5
+
+static DEFINE_IDA(xlnx_snd_card_dev);
+
+enum {
+ I2S_AUDIO = 0,
+ HDMI_AUDIO,
+ SDI_AUDIO,
+ SPDIF_AUDIO,
+ XLNX_MAX_IFACE,
+};
+
+static const char *xlnx_snd_card_name[XLNX_MAX_IFACE] = {
+ [I2S_AUDIO] = "xlnx-i2s-snd-card",
+ [HDMI_AUDIO] = "xlnx-hdmi-snd-card",
+ [SDI_AUDIO] = "xlnx-sdi-snd-card",
+ [SPDIF_AUDIO] = "xlnx-spdif-snd-card",
+};
+
+static const char *dev_compat[][XLNX_MAX_IFACE] = {
+ [XLNX_PLAYBACK] = {
+ "xlnx,i2s-transmitter-1.0",
+ "xlnx,v-hdmi-tx-ss-3.1",
+ "xlnx,v-uhdsdi-audio-2.0",
+ "xlnx,spdif-2.0",
+ },
+
+ [XLNX_CAPTURE] = {
+ "xlnx,i2s-receiver-1.0",
+ "xlnx,v-hdmi-rx-ss-3.1",
+ "xlnx,v-uhdsdi-audio-2.0",
+ "xlnx,spdif-2.0",
+ },
+};
+
+static int xlnx_spdif_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct pl_card_data *prv = snd_soc_card_get_drvdata(rtd->card);
+ u32 sample_rate = params_rate(params);
+
+ /* mclk must be >=1024 * sampleing rate */
+ prv->mclk_val = 1024 * sample_rate;
+ prv->mclk_ratio = 1024;
+ return clk_set_rate(prv->mclk, prv->mclk_val);
+}
+
+static int xlnx_sdi_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct pl_card_data *prv = snd_soc_card_get_drvdata(rtd->card);
+ u32 sample_rate = params_rate(params);
+
+ prv->mclk_val = prv->mclk_ratio * sample_rate;
+ return clk_set_rate(prv->mclk, prv->mclk_val);
+}
+
+static int xlnx_hdmi_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct pl_card_data *prv = snd_soc_card_get_drvdata(rtd->card);
+ u32 sample_rate = params_rate(params);
+
+ switch (sample_rate) {
+ case 32000:
+ case 44100:
+ case 48000:
+ case 88200:
+ case 96000:
+ case 176400:
+ case 192000:
+ prv->mclk_ratio = 512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ prv->mclk_val = prv->mclk_ratio * sample_rate;
+ return clk_set_rate(prv->mclk, prv->mclk_val);
+}
+
+static int xlnx_i2s_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ int ret, clk_div;
+ u32 ch, data_width, sample_rate;
+ struct pl_card_data *prv;
+
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+
+ ch = params_channels(params);
+ data_width = params_width(params);
+ sample_rate = params_rate(params);
+
+ /* only 2 channels supported */
+ if (ch != 2)
+ return -EINVAL;
+
+ prv = snd_soc_card_get_drvdata(rtd->card);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ switch (sample_rate) {
+ case 5512:
+ case 8000:
+ case 11025:
+ case 16000:
+ case 22050:
+ case 32000:
+ case 44100:
+ case 48000:
+ case 64000:
+ case 88200:
+ case 96000:
+ prv->mclk_ratio = 384;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (sample_rate) {
+ case 32000:
+ case 44100:
+ case 48000:
+ case 88200:
+ case 96000:
+ prv->mclk_ratio = 384;
+ break;
+ case 64000:
+ case 176400:
+ case 192000:
+ prv->mclk_ratio = 192;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ prv->mclk_val = prv->mclk_ratio * sample_rate;
+ clk_div = DIV_ROUND_UP(prv->mclk_ratio, 2 * ch * data_width);
+ ret = snd_soc_dai_set_clkdiv(cpu_dai, 0, clk_div);
+ if (ret)
+ return ret;
+
+ return clk_set_rate(prv->mclk, prv->mclk_val);
+}
+
+static const struct snd_soc_ops xlnx_sdi_card_ops = {
+ .hw_params = xlnx_sdi_card_hw_params,
+};
+
+static const struct snd_soc_ops xlnx_i2s_card_ops = {
+ .hw_params = xlnx_i2s_card_hw_params,
+};
+
+static const struct snd_soc_ops xlnx_hdmi_card_ops = {
+ .hw_params = xlnx_hdmi_card_hw_params,
+};
+
+static const struct snd_soc_ops xlnx_spdif_card_ops = {
+ .hw_params = xlnx_spdif_card_hw_params,
+};
+
+static struct snd_soc_dai_link xlnx_snd_dai[][XLNX_MAX_PATHS] = {
+ [I2S_AUDIO] = {
+ {
+ .name = "xilinx-i2s_playback",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .ops = &xlnx_i2s_card_ops,
+ },
+ {
+ .name = "xilinx-i2s_capture",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .ops = &xlnx_i2s_card_ops,
+ },
+ },
+ [HDMI_AUDIO] = {
+ {
+ .name = "xilinx-hdmi-playback",
+ .codec_dai_name = "i2s-hifi",
+ .codec_name = "hdmi-audio-codec.0",
+ .cpu_dai_name = "snd-soc-dummy-dai",
+ .ops = &xlnx_hdmi_card_ops,
+ },
+ {
+ .name = "xilinx-hdmi-capture",
+ .codec_dai_name = "xlnx_hdmi_rx",
+ .cpu_dai_name = "snd-soc-dummy-dai",
+ },
+ },
+ [SDI_AUDIO] = {
+ {
+ .name = "xlnx-sdi-playback",
+ .codec_dai_name = "xlnx_sdi_tx",
+ .cpu_dai_name = "snd-soc-dummy-dai",
+ .ops = &xlnx_sdi_card_ops,
+ },
+ {
+ .name = "xlnx-sdi-capture",
+ .codec_dai_name = "xlnx_sdi_rx",
+ .cpu_dai_name = "snd-soc-dummy-dai",
+ },
+
+ },
+ [SPDIF_AUDIO] = {
+ {
+ .name = "xilinx-spdif_playback",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .ops = &xlnx_spdif_card_ops,
+ },
+ {
+ .name = "xilinx-spdif_capture",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .ops = &xlnx_spdif_card_ops,
+ },
+ },
+
+};
+
+static int find_link(struct device_node *node, int direction)
+{
+ int ret;
+ u32 i, size;
+ const char **link_names = dev_compat[direction];
+
+ size = ARRAY_SIZE(dev_compat[direction]);
+
+ for (i = 0; i < size; i++) {
+ ret = of_device_is_compatible(node, link_names[i]);
+ if (ret)
+ return i;
+ }
+ return -ENODEV;
+}
+
+static int xlnx_snd_probe(struct platform_device *pdev)
+{
+ u32 i;
+ size_t sz;
+ char *buf;
+ int ret, audio_interface;
+ struct snd_soc_dai_link *dai;
+ struct pl_card_data *prv;
+ struct platform_device *iface_pdev;
+
+ struct snd_soc_card *card;
+ struct device_node **node = pdev->dev.platform_data;
+
+ if (!node)
+ return -ENODEV;
+
+ card = devm_kzalloc(&pdev->dev, sizeof(struct snd_soc_card),
+ GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->dev = &pdev->dev;
+
+ card->dai_link = devm_kzalloc(card->dev,
+ sizeof(*dai) * XLNX_MAX_PATHS,
+ GFP_KERNEL);
+ if (!card->dai_link)
+ return -ENOMEM;
+
+ prv = devm_kzalloc(card->dev,
+ sizeof(struct pl_card_data),
+ GFP_KERNEL);
+ if (!prv)
+ return -ENOMEM;
+
+ card->num_links = 0;
+ for (i = XLNX_PLAYBACK; i < XLNX_MAX_PATHS; i++) {
+ struct device_node *pnode = of_parse_phandle(node[i],
+ "xlnx,snd-pcm", 0);
+ if (!pnode) {
+ dev_err(card->dev, "platform node not found\n");
+ of_node_put(pnode);
+ return -ENODEV;
+ }
+
+ /*
+ * Check for either playback or capture is enough, as
+ * same clock is used for both.
+ */
+ if (i == XLNX_PLAYBACK) {
+ iface_pdev = of_find_device_by_node(pnode);
+ if (!iface_pdev) {
+ of_node_put(pnode);
+ return -ENODEV;
+ }
+
+ prv->mclk = devm_clk_get(&iface_pdev->dev, "aud_mclk");
+ if (IS_ERR(prv->mclk))
+ return PTR_ERR(prv->mclk);
+
+ }
+ of_node_put(pnode);
+
+ dai = &card->dai_link[i];
+ audio_interface = find_link(node[i], i);
+ switch (audio_interface) {
+ case I2S_AUDIO:
+ *dai = xlnx_snd_dai[I2S_AUDIO][i];
+ dai->platform_of_node = pnode;
+ dai->cpu_of_node = node[i];
+ card->num_links++;
+ snd_soc_card_set_drvdata(card, prv);
+ dev_dbg(card->dev, "%s registered\n",
+ card->dai_link[i].name);
+ break;
+ case HDMI_AUDIO:
+ *dai = xlnx_snd_dai[HDMI_AUDIO][i];
+ dai->platform_of_node = pnode;
+ if (i == XLNX_CAPTURE)
+ dai->codec_of_node = node[i];
+ card->num_links++;
+ /* TODO: support multiple sampling rates */
+ prv->mclk_ratio = 384;
+ snd_soc_card_set_drvdata(card, prv);
+ dev_dbg(card->dev, "%s registered\n",
+ card->dai_link[i].name);
+ break;
+ case SDI_AUDIO:
+ *dai = xlnx_snd_dai[SDI_AUDIO][i];
+ dai->platform_of_node = pnode;
+ dai->codec_of_node = node[i];
+ card->num_links++;
+ /* TODO: support multiple sampling rates */
+ prv->mclk_ratio = 384;
+ snd_soc_card_set_drvdata(card, prv);
+ dev_dbg(card->dev, "%s registered\n",
+ card->dai_link[i].name);
+ break;
+ case SPDIF_AUDIO:
+ *dai = xlnx_snd_dai[SPDIF_AUDIO][i];
+ dai->platform_of_node = pnode;
+ dai->cpu_of_node = node[i];
+ card->num_links++;
+ prv->mclk_ratio = 384;
+ snd_soc_card_set_drvdata(card, prv);
+ dev_dbg(card->dev, "%s registered\n",
+ card->dai_link[i].name);
+ break;
+ default:
+ dev_err(card->dev, "Invalid audio interface\n");
+ return -ENODEV;
+ }
+ }
+
+ if (card->num_links) {
+ /*
+ * Example : i2s card name = xlnx-i2s-snd-card-0
+ * length = number of chars in "xlnx-i2s-snd-card"
+ * + 1 ('-'), + 1 (card instance num)
+ * + 1 ('\0')
+ */
+ sz = strlen(xlnx_snd_card_name[audio_interface]) + 3;
+ buf = devm_kzalloc(card->dev, sz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ prv->xlnx_snd_dev_id = ida_simple_get(&xlnx_snd_card_dev, 0,
+ XLNX_MAX_PL_SND_DEV,
+ GFP_KERNEL);
+ if (prv->xlnx_snd_dev_id < 0)
+ return prv->xlnx_snd_dev_id;
+
+ snprintf(buf, sz, "%s-%d", xlnx_snd_card_name[audio_interface],
+ prv->xlnx_snd_dev_id);
+ card->name = buf;
+
+ ret = devm_snd_soc_register_card(card->dev, card);
+ if (ret) {
+ dev_err(card->dev, "%s registration failed\n",
+ card->name);
+ ida_simple_remove(&xlnx_snd_card_dev,
+ prv->xlnx_snd_dev_id);
+ return ret;
+ }
+
+ dev_set_drvdata(card->dev, prv);
+ dev_info(card->dev, "%s registered\n", card->name);
+ }
+
+ return 0;
+}
+
+static int xlnx_snd_remove(struct platform_device *pdev)
+{
+ struct pl_card_data *pdata = dev_get_drvdata(&pdev->dev);
+
+ ida_simple_remove(&xlnx_snd_card_dev, pdata->xlnx_snd_dev_id);
+ return 0;
+}
+
+static struct platform_driver xlnx_snd_driver = {
+ .driver = {
+ .name = "xlnx_snd_card",
+ },
+ .probe = xlnx_snd_probe,
+ .remove = xlnx_snd_remove,
+};
+
+module_platform_driver(xlnx_snd_driver);
+
+MODULE_DESCRIPTION("Xilinx FPGA sound card driver");
+MODULE_AUTHOR("Maruthi Srinivas Bayyavarapu");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xlnx_sdi_audio.c b/sound/soc/xilinx/xlnx_sdi_audio.c
new file mode 100644
index 000000000000..75b0b3150b6f
--- /dev/null
+++ b/sound/soc/xilinx/xlnx_sdi_audio.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx SDI embed and extract audio support
+ *
+ * Copyright (c) 2018 Xilinx Pvt., Ltd
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <drm/drm_modes.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#define DRIVER_NAME "xlnx-sdi-audio"
+
+#define XSDIAUD_CNTRL_REG_OFFSET 0x00
+#define XSDIAUD_SOFT_RST_REG_OFFSET 0x04
+#define XSDIAUD_VER_REG_OFFSET 0x08
+#define XSDIAUD_INT_EN_REG_OFFSET 0x0C
+#define XSDIAUD_INT_STS_REG_OFFSET 0x10
+#define XSDIAUD_EMB_VID_CNTRL_REG_OFFSET 0X14
+#define XSDIAUD_AUD_CNTRL_REG_OFFSET 0x18
+#define XSDIAUD_CH_VALID_REG_OFFSET 0x20
+#define XSDIAUD_CH_MUTE_REG_OFFSET 0x30
+#define XSDIAUD_ACTIVE_GRP_REG_OFFSET 0X40
+#define XSDIAUD_EXT_CH_STAT0_REG_OFFSET 0X48
+#define XSDIAUD_EXT_SRATE_STS_REG_OFFSET 0X70
+#define XSDIAUD_GUI_PARAM_REG_OFFSET 0XFC
+
+#define XSDIAUD_CNTRL_EN_MASK BIT(0)
+#define XSDIAUD_SOFT_RST_CONFIG_MASK BIT(0)
+#define XSDIAUD_SOFT_RST_CORE_MASK BIT(1)
+#define XSDIAUD_VER_MAJOR_MASK GENMASK(31, 24)
+#define XSDIAUD_VER_MINOR_MASK GENMASK(23, 16)
+
+#define XSDIAUD_EXT_GROUP_1_STS_MASK BIT(0)
+#define XSDIAUD_EXT_AUDSTS_UPDATE_MASK BIT(8)
+#define XSDIAUD_EMB_VID_CNT_ELE_SHIFT (16)
+#define XSDIAUD_EMB_VID_CNT_ELE_MASK BIT(16)
+#define XSDIAUD_EMB_VID_CNT_TSCAN_MASK BIT(8)
+#define XSDIAUD_EMB_VID_CNT_TSCAN_SHIFT (8)
+#define XSDIAUD_EMB_VID_CNT_TRATE_SHIFT (4)
+#define XSDIAUD_EMB_AUD_CNT_SS_MASK BIT(3)
+#define XSDIAUD_EMB_AUD_CNT_ASYNC_AUDIO BIT(4)
+
+#define CH_STATUS_UPDATE_TIMEOUT 40
+
+enum IP_MODE {
+ EMBED,
+ EXTRACT,
+};
+
+enum channel_id {
+ CHAN_ID_0 = 1,
+ CHAN_ID_1,
+};
+
+enum sdi_transport_family {
+ SDI_TRANSPORT_FAMILY_1920,
+ SDI_TRANSPORT_FAMILY_1280,
+ SDI_TRANSPORT_FAMILY_2048,
+ SDI_TRANSPORT_FAMILY_NTSC = 8,
+ SDI_TRANSPORT_FAMILY_PAL = 9,
+};
+
+/**
+ * enum sdi_audio_samplerate - audio sampling rate
+ * @XSDIAUD_SAMPRATE0: 48 KHz
+ * @XSDIAUD_SAMPRATE1: 44.1 KHz
+ * @XSDIAUD_SAMPRATE2: 32 KHz
+ */
+enum sdi_audio_samplerate {
+ XSDIAUD_SAMPRATE0,
+ XSDIAUD_SAMPRATE1,
+ XSDIAUD_SAMPRATE2
+};
+
+/**
+ * enum sdi_audio_samplesize - bits per sample
+ * @XSDIAUD_SAMPSIZE0: 20 Bit Audio Sample
+ * @XSDIAUD_SAMPSIZE1: 24 Bit Audio Sample
+ */
+enum sdi_audio_samplesize {
+ XSDIAUD_SAMPSIZE0,
+ XSDIAUD_SAMPSIZE1
+};
+
+struct dev_ctx {
+ enum IP_MODE mode;
+ void __iomem *base;
+ struct device *dev;
+ struct drm_display_mode *video_mode;
+ struct snd_pcm_substream *stream;
+ struct clk *axi_clk;
+ struct clk *axis_clk;
+ struct clk *aud_clk;
+ bool rx_srate_updated;
+ wait_queue_head_t srate_q;
+};
+
+static irqreturn_t xtract_irq_handler(int irq, void *dev_id)
+{
+ u32 irq_sts, irq_en, active_grps;
+ struct dev_ctx *ctx = dev_id;
+
+ irq_sts = readl(ctx->base + XSDIAUD_INT_STS_REG_OFFSET);
+ active_grps = readl(ctx->base + XSDIAUD_ACTIVE_GRP_REG_OFFSET);
+ if ((irq_sts & XSDIAUD_EXT_AUDSTS_UPDATE_MASK) &&
+ (active_grps & XSDIAUD_EXT_GROUP_1_STS_MASK)) {
+ writel(XSDIAUD_EXT_AUDSTS_UPDATE_MASK,
+ ctx->base + XSDIAUD_INT_STS_REG_OFFSET);
+ irq_en = readl(ctx->base + XSDIAUD_INT_EN_REG_OFFSET);
+ /* Disable further interrupts. sample rate status got updated*/
+ writel(irq_en & ~XSDIAUD_EXT_AUDSTS_UPDATE_MASK,
+ ctx->base + XSDIAUD_INT_EN_REG_OFFSET);
+
+ ctx->rx_srate_updated = true;
+ wake_up_interruptible(&ctx->srate_q);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void audio_enable(void __iomem *aud_base)
+{
+ u32 val;
+
+ val = readl(aud_base + XSDIAUD_CNTRL_REG_OFFSET);
+ val |= XSDIAUD_CNTRL_EN_MASK;
+ writel(val, aud_base + XSDIAUD_CNTRL_REG_OFFSET);
+}
+
+static void audio_disable(void __iomem *aud_base)
+{
+ u32 val;
+
+ val = readl(aud_base + XSDIAUD_CNTRL_REG_OFFSET);
+ val &= ~XSDIAUD_CNTRL_EN_MASK;
+ writel(val, aud_base + XSDIAUD_CNTRL_REG_OFFSET);
+}
+
+static void audio_reset_core(void __iomem *aud_base, bool reset)
+{
+ u32 val;
+
+ if (reset) {
+ /* reset the core */
+ val = readl(aud_base + XSDIAUD_SOFT_RST_REG_OFFSET);
+ val |= XSDIAUD_SOFT_RST_CORE_MASK;
+ writel(val, aud_base + XSDIAUD_SOFT_RST_REG_OFFSET);
+ } else {
+ /* bring the core out of reset */
+ val = readl(aud_base + XSDIAUD_SOFT_RST_REG_OFFSET);
+ val &= ~XSDIAUD_SOFT_RST_CORE_MASK;
+ writel(val, aud_base + XSDIAUD_SOFT_RST_REG_OFFSET);
+ }
+}
+
+static int xlnx_sdi_rx_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int err;
+ u32 val, sample_rate;
+
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+ void __iomem *base = ctx->base;
+ unsigned long jiffies = msecs_to_jiffies(CH_STATUS_UPDATE_TIMEOUT);
+
+ audio_enable(base);
+ writel(XSDIAUD_EXT_AUDSTS_UPDATE_MASK,
+ ctx->base + XSDIAUD_INT_EN_REG_OFFSET);
+ err = wait_event_interruptible_timeout(ctx->srate_q,
+ ctx->rx_srate_updated,
+ jiffies);
+
+ if (!err) {
+ dev_err(ctx->dev, "Didn't get valid audio property update\n");
+ return -EINVAL;
+ }
+ ctx->rx_srate_updated = false;
+
+ val = readl(base + XSDIAUD_EXT_SRATE_STS_REG_OFFSET);
+ /* As both channels contain same sample rate, read either of them */
+ switch (val & CHAN_ID_0) {
+ case 0:
+ sample_rate = 48000;
+ break;
+ case 1:
+ sample_rate = 44100;
+ break;
+ case 2:
+ sample_rate = 32000;
+ break;
+ }
+
+ dev_dbg(ctx->dev,
+ "sdi rx audio enabled : sample rate = %d\n", sample_rate);
+ return 0;
+}
+
+static void xlnx_sdi_rx_pcm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+
+ audio_disable(ctx->base);
+
+ dev_info(dai->dev, " sdi rx audio disabled\n");
+}
+
+static int xlnx_sdi_tx_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+
+ audio_enable(ctx->base);
+ ctx->stream = substream;
+
+ dev_info(ctx->dev, " sdi tx audio enabled\n");
+ return 0;
+}
+
+static int xlnx_sdi_tx_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ u32 val = 0;
+ u32 num_channels, sample_rate, sig_bits;
+
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+ void __iomem *base = ctx->base;
+
+ /* video mode properties needed by audio driver are shared to audio
+ * driver through a pointer in platform data. This is used here in
+ * audio driver. The solution may be needed to modify/extend to avoid
+ * probable error scenarios
+ */
+ if (!ctx->video_mode || !ctx->video_mode->vdisplay ||
+ !ctx->video_mode->vrefresh) {
+ dev_err(ctx->dev, "couldn't find video display properties\n");
+ return -EINVAL;
+ }
+
+ /*
+ * map video properties.
+ * Note: 1920x1080 and 2048x1080 are the resolutions of sub images for
+ * 3840x2160 and 4096x2160 resolutions respectively.
+ */
+ switch (ctx->video_mode->hdisplay) {
+ case 1920:
+ case 3840:
+ val = SDI_TRANSPORT_FAMILY_1920;
+ break;
+ case 1280:
+ val |= SDI_TRANSPORT_FAMILY_1280;
+ break;
+ case 2048:
+ case 4096:
+ val |= SDI_TRANSPORT_FAMILY_2048;
+ break;
+ case 720:
+ if (ctx->video_mode->vdisplay == 486)
+ val |= SDI_TRANSPORT_FAMILY_NTSC;
+ else if (ctx->video_mode->vdisplay == 576)
+ val |= SDI_TRANSPORT_FAMILY_PAL;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (ctx->video_mode->vrefresh) {
+ case 24:
+ val |= (3 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 25:
+ val |= (5 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 30:
+ val |= (7 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 48:
+ val |= (8 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 50:
+ val |= (9 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 60:
+ val |= (11 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!(ctx->video_mode->flags & DRM_MODE_FLAG_INTERLACE))
+ val |= XSDIAUD_EMB_VID_CNT_TSCAN_MASK;
+
+ val |= XSDIAUD_EMB_VID_CNT_ELE_MASK;
+
+ writel(val, base + XSDIAUD_EMB_VID_CNTRL_REG_OFFSET);
+
+ /* map audio properties */
+ num_channels = params_channels(params);
+ sample_rate = params_rate(params);
+ sig_bits = snd_pcm_format_width(params_format(params));
+
+ dev_info(ctx->dev,
+ "stream params: channels = %d sample_rate = %d bits = %d\n",
+ num_channels, sample_rate, sig_bits);
+
+ val = 0;
+ val |= XSDIAUD_EMB_AUD_CNT_ASYNC_AUDIO;
+
+ switch (sample_rate) {
+ case 48000:
+ val |= XSDIAUD_SAMPRATE0;
+ break;
+ case 44100:
+ val |= XSDIAUD_SAMPRATE1;
+ break;
+ case 32000:
+ val |= XSDIAUD_SAMPRATE2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (sig_bits == 24)
+ val |= XSDIAUD_EMB_AUD_CNT_SS_MASK;
+
+ writel(val, base + XSDIAUD_AUD_CNTRL_REG_OFFSET);
+
+ /* TODO: support more channels, currently only 2. */
+ writel(CHAN_ID_1 | CHAN_ID_0, base + XSDIAUD_CH_VALID_REG_OFFSET);
+
+ return 0;
+}
+
+static void xlnx_sdi_tx_pcm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+ void __iomem *base = ctx->base;
+
+ audio_disable(base);
+ ctx->stream = NULL;
+
+ dev_info(ctx->dev, " sdi tx audio disabled\n");
+}
+
+static const struct snd_soc_component_driver xlnx_sdi_component = {
+ .name = "xlnx-sdi-dai-component",
+};
+
+static const struct snd_soc_dai_ops xlnx_sdi_rx_dai_ops = {
+ .startup = xlnx_sdi_rx_pcm_startup,
+ .shutdown = xlnx_sdi_rx_pcm_shutdown,
+};
+
+static struct snd_soc_dai_driver xlnx_sdi_rx_dai = {
+ .name = "xlnx_sdi_rx",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &xlnx_sdi_rx_dai_ops,
+};
+
+static const struct snd_soc_dai_ops xlnx_sdi_tx_dai_ops = {
+ .startup = xlnx_sdi_tx_pcm_startup,
+ .hw_params = xlnx_sdi_tx_hw_params,
+ .shutdown = xlnx_sdi_tx_pcm_shutdown,
+};
+
+static struct snd_soc_dai_driver xlnx_sdi_tx_dai = {
+ .name = "xlnx_sdi_tx",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &xlnx_sdi_tx_dai_ops,
+};
+
+static int xlnx_sdi_audio_probe(struct platform_device *pdev)
+{
+ u32 val;
+ int ret;
+ struct dev_ctx *ctx;
+ struct resource *res;
+ struct device *video_dev;
+ struct device_node *video_node;
+ struct platform_device *video_pdev;
+ struct snd_soc_dai_driver *snd_dai;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+
+ /* TODO - remove before upstreaming */
+ if (of_device_is_compatible(node, "xlnx,v-uhdsdi-audio-1.0")) {
+ dev_err(&pdev->dev, "driver doesn't support sdi audio v1.0\n");
+ return -ENODEV;
+ }
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(struct dev_ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENODEV;
+
+ ctx->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(ctx->axi_clk)) {
+ ret = PTR_ERR(ctx->axi_clk);
+ dev_err(&pdev->dev, "failed to get s_axi_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctx->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable s_axi_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No IO MEM resource found\n");
+ ret = -ENODEV;
+ goto err_axis;
+ }
+
+ ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (!ctx->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -EADDRNOTAVAIL;
+ goto err_axis;
+ }
+
+ ctx->dev = &pdev->dev;
+
+ val = readl(ctx->base + XSDIAUD_GUI_PARAM_REG_OFFSET);
+ if (val & BIT(6)) {
+ ctx->mode = EXTRACT;
+
+ ctx->axis_clk = devm_clk_get(&pdev->dev, "m_axis_clk");
+ if (IS_ERR(ctx->axis_clk)) {
+ ret = PTR_ERR(ctx->axis_clk);
+ dev_err(&pdev->dev, "failed to get m_axis_clk(%d)\n",
+ ret);
+ goto err_axis;
+ }
+
+ ctx->aud_clk = devm_clk_get(&pdev->dev, "sdi_extract_clk");
+ if (IS_ERR(ctx->aud_clk)) {
+ ret = PTR_ERR(ctx->aud_clk);
+ dev_err(&pdev->dev, "failed to get sdi_extract_clk(%d)\n",
+ ret);
+ goto err_axis;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No IRQ resource found\n");
+ ret = -ENODEV;
+ goto err_axis;
+ }
+ ret = devm_request_irq(&pdev->dev, res->start,
+ xtract_irq_handler,
+ 0, "XLNX_SDI_AUDIO_XTRACT", ctx);
+ if (ret) {
+ dev_err(&pdev->dev, "extract irq request failed\n");
+ ret = -ENODEV;
+ goto err_axis;
+ }
+
+ init_waitqueue_head(&ctx->srate_q);
+
+ snd_dai = &xlnx_sdi_rx_dai;
+ } else {
+ ctx->mode = EMBED;
+ ctx->axis_clk = devm_clk_get(&pdev->dev, "s_axis_clk");
+ if (IS_ERR(ctx->axis_clk)) {
+ ret = PTR_ERR(ctx->axis_clk);
+ dev_err(&pdev->dev, "failed to get s_axis_clk(%d)\n",
+ ret);
+ goto err_axis;
+ }
+
+ ctx->aud_clk = devm_clk_get(&pdev->dev, "sdi_embed_clk");
+ if (IS_ERR(ctx->aud_clk)) {
+ ret = PTR_ERR(ctx->aud_clk);
+ dev_err(&pdev->dev, "failed to get aud_clk(%d)\n",
+ ret);
+ goto err_axis;
+ }
+
+ video_node = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
+ if (!video_node) {
+ dev_err(ctx->dev, "video_node not found\n");
+ of_node_put(video_node);
+ ret = -ENODEV;
+ goto err_axis;
+ }
+
+ video_pdev = of_find_device_by_node(video_node);
+ if (!video_pdev) {
+ of_node_put(video_node);
+ ret = -ENODEV;
+ goto err_axis;
+ }
+
+ video_dev = &video_pdev->dev;
+ ctx->video_mode =
+ (struct drm_display_mode *)video_dev->platform_data;
+ /* invalid 'platform_data' implies video driver is not loaded */
+ if (!ctx->video_mode) {
+ of_node_put(video_node);
+ ret = -EPROBE_DEFER;
+ goto err_axis;
+ }
+
+ snd_dai = &xlnx_sdi_tx_dai;
+ of_node_put(video_node);
+ }
+
+ ret = clk_prepare_enable(ctx->axis_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable s_axis_clk(%d)\n", ret);
+ goto err_axis;
+ }
+
+ ret = clk_prepare_enable(ctx->aud_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable sdi_extract_clk(%d)\n", ret);
+ goto err_aud_clk;
+ }
+
+ ret = devm_snd_soc_register_component(&pdev->dev, &xlnx_sdi_component,
+ snd_dai, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't register codec DAI\n");
+ goto err_clk;
+ }
+
+ dev_set_drvdata(&pdev->dev, ctx);
+
+ audio_reset_core(ctx->base, true);
+ audio_reset_core(ctx->base, false);
+
+ dev_info(&pdev->dev, "xlnx sdi codec dai component registered\n");
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(ctx->aud_clk);
+err_aud_clk:
+ clk_disable_unprepare(ctx->axis_clk);
+err_axis:
+ clk_disable_unprepare(ctx->axi_clk);
+ return ret;
+}
+
+static int xlnx_sdi_audio_remove(struct platform_device *pdev)
+{
+ struct dev_ctx *ctx = dev_get_drvdata(&pdev->dev);
+
+ audio_disable(ctx->base);
+ audio_reset_core(ctx->base, true);
+
+ clk_disable_unprepare(ctx->aud_clk);
+ clk_disable_unprepare(ctx->axis_clk);
+ clk_disable_unprepare(ctx->axi_clk);
+ return 0;
+}
+
+static const struct of_device_id xlnx_sdi_audio_of_match[] = {
+ { .compatible = "xlnx,v-uhdsdi-audio-1.0"},
+ { .compatible = "xlnx,v-uhdsdi-audio-2.0"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_sdi_audio_of_match);
+
+static struct platform_driver xlnx_sdi_audio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xlnx_sdi_audio_of_match,
+ },
+ .probe = xlnx_sdi_audio_probe,
+ .remove = xlnx_sdi_audio_remove,
+};
+
+module_platform_driver(xlnx_sdi_audio_driver);
+
+MODULE_DESCRIPTION("xilinx sdi audio codec driver");
+MODULE_AUTHOR("Maruthi Srinivas Bayyavarapu");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xlnx_snd_common.h b/sound/soc/xilinx/xlnx_snd_common.h
new file mode 100644
index 000000000000..39461fac0d96
--- /dev/null
+++ b/sound/soc/xilinx/xlnx_snd_common.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx ASoC sound card support
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#ifndef _XLNX_SND_COMMON_H
+#define _XLNX_SND_COMMON_H
+
+enum {
+ XLNX_PLAYBACK,
+ XLNX_CAPTURE,
+ XLNX_MAX_PATHS
+};
+
+struct pl_card_data {
+ u32 mclk_val;
+ u32 mclk_ratio;
+ int xlnx_snd_dev_id;
+ struct clk *mclk;
+};
+#endif /* _XLNX_SND_COMMON_H */